aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/alg.go88
-rw-r--r--src/runtime/arena.go4
-rw-r--r--src/runtime/asm.s27
-rw-r--r--src/runtime/asm_amd64.s7
-rw-r--r--src/runtime/asm_arm64.s3
-rw-r--r--src/runtime/asm_loong64.s98
-rw-r--r--src/runtime/asm_mipsx.s37
-rw-r--r--src/runtime/asm_ppc64x.s11
-rw-r--r--src/runtime/atomic_pointer.go10
-rw-r--r--src/runtime/badlinkname.go22
-rw-r--r--src/runtime/badlinkname_linux.go17
-rw-r--r--src/runtime/cgo.go19
-rw-r--r--src/runtime/cgo/gcc_stack_darwin.c5
-rw-r--r--src/runtime/cgo/gcc_stack_unix.c9
-rw-r--r--src/runtime/cgocall.go40
-rw-r--r--src/runtime/chan.go10
-rw-r--r--src/runtime/chanbarrier_test.go14
-rw-r--r--src/runtime/checkptr.go10
-rw-r--r--src/runtime/coro.go68
-rw-r--r--src/runtime/coro_test.go73
-rw-r--r--src/runtime/coverage/apis.go178
-rw-r--r--src/runtime/coverage/coverage.go66
-rw-r--r--src/runtime/coverage/dummy.s8
-rw-r--r--src/runtime/coverage/emit.go609
-rw-r--r--src/runtime/coverage/emitdata_test.go550
-rw-r--r--src/runtime/coverage/hooks.go42
-rw-r--r--src/runtime/coverage/testdata/harness.go259
-rw-r--r--src/runtime/coverage/testdata/issue56006/repro.go26
-rw-r--r--src/runtime/coverage/testdata/issue56006/repro_test.go8
-rw-r--r--src/runtime/coverage/testdata/issue59563/repro.go823
-rw-r--r--src/runtime/coverage/testdata/issue59563/repro_test.go14
-rw-r--r--src/runtime/coverage/testsupport.go323
-rw-r--r--src/runtime/coverage/ts_test.go207
-rw-r--r--src/runtime/covercounter.go4
-rw-r--r--src/runtime/covermeta.go66
-rw-r--r--src/runtime/cpuprof.go21
-rw-r--r--src/runtime/crash_test.go32
-rw-r--r--src/runtime/debug.go19
-rw-r--r--src/runtime/debug/example_monitor_test.go2
-rw-r--r--src/runtime/debug/garbage.go4
-rw-r--r--src/runtime/debug/stack.go8
-rw-r--r--src/runtime/debug/stack_test.go3
-rw-r--r--src/runtime/ehooks_test.go46
-rw-r--r--src/runtime/env_posix.go21
-rw-r--r--src/runtime/error.go35
-rw-r--r--src/runtime/exithook.go69
-rw-r--r--src/runtime/export_debug_test.go3
-rw-r--r--src/runtime/extern.go15
-rw-r--r--src/runtime/gc_test.go20
-rw-r--r--src/runtime/hash_test.go10
-rw-r--r--src/runtime/iface.go45
-rw-r--r--src/runtime/linkname.go34
-rw-r--r--src/runtime/linkname_unix.go12
-rw-r--r--src/runtime/lockrank.go3
-rw-r--r--src/runtime/malloc.go66
-rw-r--r--src/runtime/map.go175
-rw-r--r--src/runtime/map_fast32.go29
-rw-r--r--src/runtime/map_fast64.go31
-rw-r--r--src/runtime/map_faststr.go20
-rw-r--r--src/runtime/map_test.go6
-rw-r--r--src/runtime/mbarrier.go60
-rw-r--r--src/runtime/mbitmap.go9
-rw-r--r--src/runtime/memclr_loong64.s4
-rw-r--r--src/runtime/memmove_loong64.s5
-rw-r--r--src/runtime/metrics/doc.go19
-rw-r--r--src/runtime/metrics_test.go65
-rw-r--r--src/runtime/mfinal.go8
-rw-r--r--src/runtime/mgc.go24
-rw-r--r--src/runtime/mgcscavenge.go2
-rw-r--r--src/runtime/mgcsweep.go36
-rw-r--r--src/runtime/mheap.go27
-rw-r--r--src/runtime/mklockrank.go1
-rw-r--r--src/runtime/mksizeclasses.go4
-rw-r--r--src/runtime/mpallocbits.go1
-rw-r--r--src/runtime/mprof.go418
-rw-r--r--src/runtime/netpoll.go3
-rw-r--r--src/runtime/netpoll_kqueue_event.go2
-rw-r--r--src/runtime/os3_plan9.go3
-rw-r--r--src/runtime/os_plan9.go13
-rw-r--r--src/runtime/os_windows.go16
-rw-r--r--src/runtime/pagetrace_off.go28
-rw-r--r--src/runtime/pagetrace_on.go358
-rw-r--r--src/runtime/panic.go70
-rw-r--r--src/runtime/panic_test.go2
-rw-r--r--src/runtime/pprof/label.go4
-rw-r--r--src/runtime/pprof/pprof.go107
-rw-r--r--src/runtime/pprof/pprof_test.go153
-rw-r--r--src/runtime/pprof/protomem.go5
-rw-r--r--src/runtime/pprof/protomem_test.go9
-rw-r--r--src/runtime/preempt.go7
-rw-r--r--src/runtime/proc.go235
-rw-r--r--src/runtime/proflabel.go18
-rw-r--r--src/runtime/race.go34
-rw-r--r--src/runtime/race/testdata/rangefunc_test.go77
-rw-r--r--src/runtime/race_amd64.s51
-rw-r--r--src/runtime/race_arm64.s50
-rw-r--r--src/runtime/race_ppc64le.s46
-rw-r--r--src/runtime/race_s390x.s50
-rw-r--r--src/runtime/rand.go27
-rw-r--r--src/runtime/rt0_linux_ppc64le.s4
-rw-r--r--src/runtime/runtime.go26
-rw-r--r--src/runtime/runtime1.go65
-rw-r--r--src/runtime/runtime2.go25
-rw-r--r--src/runtime/runtime_test.go6
-rw-r--r--src/runtime/security_unix.go6
-rw-r--r--src/runtime/sema.go18
-rw-r--r--src/runtime/signal_unix.go40
-rw-r--r--src/runtime/sizeclasses.go1
-rw-r--r--src/runtime/slice.go30
-rw-r--r--src/runtime/stack.go14
-rw-r--r--src/runtime/string.go20
-rw-r--r--src/runtime/stubs.go100
-rw-r--r--src/runtime/symtab.go126
-rw-r--r--src/runtime/symtabinl.go28
-rw-r--r--src/runtime/symtabinl_test.go7
-rw-r--r--src/runtime/sys_darwin.go40
-rw-r--r--src/runtime/sys_linux_ppc64x.s4
-rw-r--r--src/runtime/sys_openbsd3.go20
-rw-r--r--src/runtime/syscall_windows.go8
-rw-r--r--src/runtime/testdata/testexithooks/testexithooks.go46
-rw-r--r--src/runtime/testdata/testprog/coro.go185
-rw-r--r--src/runtime/testdata/testprog/crash.go6
-rw-r--r--src/runtime/testdata/testprog/panicprint.go2
-rw-r--r--src/runtime/testdata/testprogcgo/coro.go185
-rw-r--r--src/runtime/testdata/testprogcgo/issue29707.go60
-rw-r--r--src/runtime/time_fake.go1
-rw-r--r--src/runtime/time_nofake.go26
-rw-r--r--src/runtime/timestub.go11
-rw-r--r--src/runtime/trace.go41
-rw-r--r--src/runtime/trace_cgo_test.go62
-rw-r--r--src/runtime/traceallocfree.go162
-rw-r--r--src/runtime/traceback.go15
-rw-r--r--src/runtime/traceback_system_test.go2
-rw-r--r--src/runtime/tracebuf.go13
-rw-r--r--src/runtime/traceevent.go9
-rw-r--r--src/runtime/traceexp.go68
-rw-r--r--src/runtime/traceregion.go6
-rw-r--r--src/runtime/traceruntime.go66
-rw-r--r--src/runtime/tracestack.go38
-rw-r--r--src/runtime/tracetime.go8
-rw-r--r--src/runtime/tracetype.go82
-rw-r--r--src/runtime/type.go18
-rw-r--r--src/runtime/vdso_linux_amd64.go5
143 files changed, 3916 insertions, 4324 deletions
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index cab0abf577..bfb9fa1d29 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -45,9 +45,66 @@ func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
var useAeshash bool
// in asm_*.s
+
+// memhash should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/aacfactory/fns
+// - github.com/dgraph-io/ristretto
+// - github.com/minio/simdjson-go
+// - github.com/nbd-wtf/go-nostr
+// - github.com/outcaste-io/ristretto
+// - github.com/puzpuzpuz/xsync/v2
+// - github.com/puzpuzpuz/xsync/v3
+// - github.com/segmentio/parquet-go
+// - github.com/parquet-go/parquet-go
+// - github.com/authzed/spicedb
+// - github.com/pingcap/badger
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname memhash
func memhash(p unsafe.Pointer, h, s uintptr) uintptr
+
+// memhash32 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/segmentio/parquet-go
+// - github.com/parquet-go/parquet-go
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname memhash32
func memhash32(p unsafe.Pointer, h uintptr) uintptr
+
+// memhash64 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/segmentio/parquet-go
+// - github.com/parquet-go/parquet-go
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname memhash64
func memhash64(p unsafe.Pointer, h uintptr) uintptr
+
+// strhash should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/aristanetworks/goarista
+// - github.com/bytedance/sonic
+// - github.com/bytedance/go-tagexpr/v2
+// - github.com/cloudwego/frugal
+// - github.com/cloudwego/dynamicgo
+// - github.com/v2fly/v2ray-core/v5
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname strhash
func strhash(p unsafe.Pointer, h uintptr) uintptr
func strhashFallback(a unsafe.Pointer, h uintptr) uintptr {
@@ -115,6 +172,16 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr {
}
}
+// nilinterhash should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/anacrolix/stm
+// - github.com/aristanetworks/goarista
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname nilinterhash
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*eface)(p)
t := a._type
@@ -142,6 +209,17 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
// maps generated by reflect.MapOf (reflect_typehash, below).
// Note: this function must match the compiler generated
// functions exactly. See issue 37716.
+//
+// typehash should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/puzpuzpuz/xsync/v2
+// - github.com/puzpuzpuz/xsync/v3
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname typehash
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.TFlag&abi.TFlagRegularMemory != 0 {
// Handle ptr sizes specially, see issue 37086.
@@ -342,6 +420,16 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
}
// Testing adapters for hash quality tests (see hash_test.go)
+//
+// stringHash should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/k14s/starlark-go
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname stringHash
func stringHash(s string, seed uintptr) uintptr {
return strhash(noescape(unsafe.Pointer(&s)), seed)
}
diff --git a/src/runtime/arena.go b/src/runtime/arena.go
index bb88ed053d..47b131466c 100644
--- a/src/runtime/arena.go
+++ b/src/runtime/arena.go
@@ -828,10 +828,6 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) {
}
if debug.malloc {
- if debug.allocfreetrace != 0 {
- tracealloc(unsafe.Pointer(span.base()), userArenaChunkBytes, nil)
- }
-
if inittrace.active && inittrace.id == getg().goid {
// Init functions are executed sequentially in a single goroutine.
inittrace.bytes += uint64(userArenaChunkBytes)
diff --git a/src/runtime/asm.s b/src/runtime/asm.s
index 75b3013f4b..f487e44100 100644
--- a/src/runtime/asm.s
+++ b/src/runtime/asm.s
@@ -13,30 +13,3 @@ TEXT ·sigpanic0(SB),NOSPLIT,$0-0
TEXT ·mapinitnoop<ABIInternal>(SB),NOSPLIT,$0-0
RET
-#ifndef GOARCH_386
-#ifndef GOARCH_arm
-#ifndef GOARCH_amd64
-#ifndef GOARCH_arm64
-#ifndef GOARCH_loong64
-#ifndef GOARCH_mips64
-#ifndef GOARCH_mips64le
-#ifndef GOARCH_ppc64
-#ifndef GOARCH_ppc64le
-#ifndef GOARCH_riscv64
-#ifndef GOARCH_s390x
-#ifndef GOARCH_wasm
-// stub to appease shared build mode.
-TEXT ·switchToCrashStack0<ABIInternal>(SB),NOSPLIT,$0-0
- UNDEF
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 1071d270c1..cdf9874a7f 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -371,8 +371,9 @@ bad_cpu: // show that the program requires a certain microarchitecture level.
CALL runtime·abort(SB)
RET
- // Prevent dead-code elimination of debugCallV2, which is
+ // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are
// intended to be called by debuggers.
+ MOVQ $runtime·debugPinnerV1<ABIInternal>(SB), AX
MOVQ $runtime·debugCallV2<ABIInternal>(SB), AX
RET
@@ -456,6 +457,10 @@ goodm:
PUSHQ AX // open up space for fn's arg spill slot
MOVQ 0(DX), R12
CALL R12 // fn(g)
+ // The Windows native stack unwinder incorrectly classifies the next instruction
+ // as part of the function epilogue, producing a wrong call stack.
+ // Add a NOP to work around this issue. See go.dev/issue/67007.
+ BYTE $0x90
POPQ AX
JMP runtime·badmcall2(SB)
RET
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index 6d77b08a1b..64a1880589 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -96,8 +96,9 @@ nocgo:
// start this M
BL runtime·mstart(SB)
- // Prevent dead-code elimination of debugCallV2, which is
+ // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are
// intended to be called by debuggers.
+ MOVD $runtime·debugPinnerV1<ABIInternal>(SB), R0
MOVD $runtime·debugCallV2<ABIInternal>(SB), R0
MOVD $0, R0
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index 7c5ecb8a01..c16b27a0f2 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -124,12 +124,7 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
-#ifdef GOEXPERIMENT_regabiargs
MOVV R4, REGCTXT
-#else
- MOVV fn+0(FP), REGCTXT
-#endif
-
// Save caller state in g->sched
MOVV R3, (g_sched+gobuf_sp)(g)
MOVV R1, (g_sched+gobuf_pc)(g)
@@ -693,7 +688,6 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1
MOVB R19, ret+0(FP)
RET
-#ifdef GOEXPERIMENT_regabiargs
// spillArgs stores return values from registers to a *internal/abi.RegArgs in R25.
TEXT ·spillArgs(SB),NOSPLIT,$0-0
MOVV R4, (0*8)(R25)
@@ -765,13 +759,6 @@ TEXT ·unspillArgs(SB),NOSPLIT,$0-0
MOVD (30*8)(R25), F14
MOVD (31*8)(R25), F15
RET
-#else
-TEXT ·spillArgs(SB),NOSPLIT,$0-0
- RET
-
-TEXT ·unspillArgs(SB),NOSPLIT,$0-0
- RET
-#endif
// gcWriteBarrier informs the GC about heap pointer writes.
//
@@ -901,155 +888,70 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
// then tail call to the corresponding runtime handler.
// The tail call makes these stubs disappear in backtraces.
TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R20, R4
MOVV R21, R5
-#else
- MOVV R20, x+0(FP)
- MOVV R21, y+8(FP)
-#endif
JMP runtime·goPanicIndex<ABIInternal>(SB)
TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R20, R4
MOVV R21, R5
-#else
- MOVV R20, x+0(FP)
- MOVV R21, y+8(FP)
-#endif
JMP runtime·goPanicIndexU<ABIInternal>(SB)
TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R21, R4
MOVV R23, R5
-#else
- MOVV R21, x+0(FP)
- MOVV R23, y+8(FP)
-#endif
JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R21, R4
MOVV R23, R5
-#else
- MOVV R21, x+0(FP)
- MOVV R23, y+8(FP)
-#endif
JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R21, R4
MOVV R23, R5
-#else
- MOVV R21, x+0(FP)
- MOVV R23, y+8(FP)
-#endif
JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R21, R4
MOVV R23, R5
-#else
- MOVV R21, x+0(FP)
- MOVV R23, y+8(FP)
-#endif
JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R20, R4
MOVV R21, R5
-#else
- MOVV R20, x+0(FP)
- MOVV R21, y+8(FP)
-#endif
JMP runtime·goPanicSliceB<ABIInternal>(SB)
TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R20, R4
MOVV R21, R5
-#else
- MOVV R20, x+0(FP)
- MOVV R21, y+8(FP)
-#endif
JMP runtime·goPanicSliceBU<ABIInternal>(SB)
TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R23, R4
MOVV R24, R5
-#else
- MOVV R23, x+0(FP)
- MOVV R24, y+8(FP)
-#endif
JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R23, R4
MOVV R24, R5
-#else
- MOVV R23, x+0(FP)
- MOVV R24, y+8(FP)
-#endif
JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R23, R4
MOVV R24, R5
-#else
- MOVV R23, x+0(FP)
- MOVV R24, y+8(FP)
-#endif
JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R23, R4
MOVV R24, R5
-#else
- MOVV R23, x+0(FP)
- MOVV R24, y+8(FP)
-#endif
JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R21, R4
MOVV R23, R5
-#else
- MOVV R21, x+0(FP)
- MOVV R23, y+8(FP)
-#endif
JMP runtime·goPanicSlice3B<ABIInternal>(SB)
TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R21, R4
MOVV R23, R5
-#else
- MOVV R21, x+0(FP)
- MOVV R23, y+8(FP)
-#endif
JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R20, R4
MOVV R21, R5
-#else
- MOVV R20, x+0(FP)
- MOVV R21, y+8(FP)
-#endif
JMP runtime·goPanicSlice3C<ABIInternal>(SB)
TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R20, R4
MOVV R21, R5
-#else
- MOVV R20, x+0(FP)
- MOVV R21, y+8(FP)
-#endif
JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVV R23, R4
MOVV R24, R5
-#else
- MOVV R23, x+0(FP)
- MOVV R24, y+8(FP)
-#endif
JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s
index eed4a05b38..ca95f22bd6 100644
--- a/src/runtime/asm_mipsx.s
+++ b/src/runtime/asm_mipsx.s
@@ -204,6 +204,29 @@ noswitch:
ADD $4, R29
JMP (R4)
+// func switchToCrashStack0(fn func())
+TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-4
+ MOVW fn+0(FP), REGCTXT // context register
+ MOVW g_m(g), R2 // curm
+
+ // set g to gcrash
+ MOVW $runtime·gcrash(SB), g // g = &gcrash
+ CALL runtime·save_g(SB)
+ MOVW R2, g_m(g) // g.m = curm
+ MOVW g, m_g0(R2) // curm.g0 = g
+
+ // switch to crashstack
+ MOVW (g_stack+stack_hi)(g), R2
+ ADDU $(-4*8), R2, R29
+
+ // call target function
+ MOVW 0(REGCTXT), R25
+ JAL (R25)
+
+ // should never return
+ CALL runtime·abort(SB)
+ UNDEF
+
/*
* support for morestack
*/
@@ -217,6 +240,13 @@ noswitch:
// calling the scheduler calling newm calling gc), so we must
// record an argument size. For that purpose, it has no arguments.
TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
+ // Called from f.
+ // Set g->sched to context in f.
+ MOVW R29, (g_sched+gobuf_sp)(g)
+ MOVW R31, (g_sched+gobuf_pc)(g)
+ MOVW R3, (g_sched+gobuf_lr)(g)
+ MOVW REGCTXT, (g_sched+gobuf_ctxt)(g)
+
// Cannot grow scheduler stack (m->g0).
MOVW g_m(g), R7
MOVW m_g0(R7), R8
@@ -231,13 +261,6 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
JAL runtime·abort(SB)
// Called from f.
- // Set g->sched to context in f.
- MOVW R29, (g_sched+gobuf_sp)(g)
- MOVW R31, (g_sched+gobuf_pc)(g)
- MOVW R3, (g_sched+gobuf_lr)(g)
- MOVW REGCTXT, (g_sched+gobuf_ctxt)(g)
-
- // Called from f.
// Set m->morebuf to f's caller.
MOVW R3, (m_morebuf+gobuf_pc)(R7) // f's caller's PC
MOVW R29, (m_morebuf+gobuf_sp)(R7) // f's caller's SP
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index 97c2a370bd..2b8c4d42a3 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -40,7 +40,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// If there is a _cgo_init, call it using the gcc ABI.
MOVD _cgo_init(SB), R12
- CMP R0, R12
+ CMP R12, $0
BEQ nocgo
#ifdef GO_PPC64X_HAS_FUNCDESC
@@ -98,9 +98,10 @@ nocgo:
// start this M
BL runtime·mstart(SB)
- // Prevent dead-code elimination of debugCallV2, which is
+ // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are
// intended to be called by debuggers.
#ifdef GOARCH_ppc64le
+ MOVD $runtime·debugPinnerV1<ABIInternal>(SB), R31
MOVD $runtime·debugCallV2<ABIInternal>(SB), R31
#endif
MOVD R0, 0(R0)
@@ -465,7 +466,7 @@ callfn: \
#ifdef GOOS_aix \
/* AIX won't trigger a SIGSEGV if R11 = nil */ \
/* So it manually triggers it */ \
- CMP R0, R11 \
+ CMP R11, $0 \
BNE 2(PC) \
MOVD R0, 0(R0) \
#endif \
@@ -563,7 +564,7 @@ TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
MOVD R0, (g_sched+gobuf_ret)(g)
// Assert ctxt is zero. See func save.
MOVD (g_sched+gobuf_ctxt)(g), R31
- CMP R0, R31
+ CMP R31, $0
BEQ 2(PC)
BL runtime·abort(SB)
RET
@@ -1234,7 +1235,7 @@ TEXT runtime·debugCallV2<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
CALL runtime·debugCallCheck(SB)
MOVD 40(R1), R22
XOR R0, R0
- CMP R22, R0
+ CMP R22, $0
BEQ good
MOVD 48(R1), R22
MOVD $8, R20
diff --git a/src/runtime/atomic_pointer.go b/src/runtime/atomic_pointer.go
index e3d17b5cf8..df067ede77 100644
--- a/src/runtime/atomic_pointer.go
+++ b/src/runtime/atomic_pointer.go
@@ -18,6 +18,16 @@ import (
// atomicwb performs a write barrier before an atomic pointer write.
// The caller should guard the call with "if writeBarrier.enabled".
//
+// atomicwb should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/gopkg
+// - github.com/songzhibin97/gkit
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname atomicwb
//go:nosplit
func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) {
slot := (*uintptr)(unsafe.Pointer(ptr))
diff --git a/src/runtime/badlinkname.go b/src/runtime/badlinkname.go
new file mode 100644
index 0000000000..b195bebbda
--- /dev/null
+++ b/src/runtime/badlinkname.go
@@ -0,0 +1,22 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import _ "unsafe"
+
+// These should be an internal details
+// but widely used packages access them using linkname.
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+
+// Notable members of the hall of shame include:
+// - github.com/dgraph-io/ristretto
+// - github.com/outcaste-io/ristretto
+// - github.com/clubpay/ronykit
+//go:linkname cputicks
+
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor (from assembly)
+//go:linkname sched
diff --git a/src/runtime/badlinkname_linux.go b/src/runtime/badlinkname_linux.go
new file mode 100644
index 0000000000..ad74528da2
--- /dev/null
+++ b/src/runtime/badlinkname_linux.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package runtime
+
+import _ "unsafe"
+
+// As of Go 1.22, the symbols below are found to be pulled via
+// linkname in the wild. We provide a push linkname here, to
+// keep them accessible with pull linknames.
+// This may change in the future. Please do not depend on them
+// in new code.
+
+//go:linkname vdsoClockgettimeSym
diff --git a/src/runtime/cgo.go b/src/runtime/cgo.go
index 40c8c748d3..8285d87fcf 100644
--- a/src/runtime/cgo.go
+++ b/src/runtime/cgo.go
@@ -35,9 +35,28 @@ var (
)
// iscgo is set to true by the runtime/cgo package
+//
+// iscgo should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ebitengine/purego
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname iscgo
var iscgo bool
// set_crosscall2 is set by the runtime/cgo package
+// set_crosscall2 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ebitengine/purego
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname set_crosscall2
var set_crosscall2 func()
// cgoHasExtraM is set on startup when an extra M is created for cgo.
diff --git a/src/runtime/cgo/gcc_stack_darwin.c b/src/runtime/cgo/gcc_stack_darwin.c
index 0a9038eb3b..28364c7420 100644
--- a/src/runtime/cgo/gcc_stack_darwin.c
+++ b/src/runtime/cgo/gcc_stack_darwin.c
@@ -15,6 +15,11 @@ x_cgo_getstackbound(uintptr bounds[2])
p = pthread_self();
addr = pthread_get_stackaddr_np(p); // high address (!)
size = pthread_get_stacksize_np(p);
+
+ // bounds points into the Go stack. TSAN can't see the synchronization
+ // in Go around stack reuse.
+ _cgo_tsan_acquire();
bounds[0] = (uintptr)addr - size;
bounds[1] = (uintptr)addr;
+ _cgo_tsan_release();
}
diff --git a/src/runtime/cgo/gcc_stack_unix.c b/src/runtime/cgo/gcc_stack_unix.c
index 67efd9bc63..fcb03d0dea 100644
--- a/src/runtime/cgo/gcc_stack_unix.c
+++ b/src/runtime/cgo/gcc_stack_unix.c
@@ -18,6 +18,9 @@ x_cgo_getstackbound(uintptr bounds[2])
void *addr;
size_t size;
+ // Needed before pthread_getattr_np, too, since before glibc 2.32
+ // it did not call pthread_attr_init in all cases (see #65625).
+ pthread_attr_init(&attr);
#if defined(__GLIBC__) || (defined(__sun) && !defined(__illumos__))
// pthread_getattr_np is a GNU extension supported in glibc.
// Solaris is not glibc but does support pthread_getattr_np
@@ -25,18 +28,20 @@ x_cgo_getstackbound(uintptr bounds[2])
pthread_getattr_np(pthread_self(), &attr); // GNU extension
pthread_attr_getstack(&attr, &addr, &size); // low address
#elif defined(__illumos__)
- pthread_attr_init(&attr);
pthread_attr_get_np(pthread_self(), &attr);
pthread_attr_getstack(&attr, &addr, &size); // low address
#else
// We don't know how to get the current stacks, so assume they are the
// same as the default stack bounds.
- pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
addr = __builtin_frame_address(0) + 4096 - size;
#endif
pthread_attr_destroy(&attr);
+ // bounds points into the Go stack. TSAN can't see the synchronization
+ // in Go around stack reuse.
+ _cgo_tsan_acquire();
bounds[0] = (uintptr)addr;
bounds[1] = (uintptr)addr + size;
+ _cgo_tsan_release();
}
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 8f09b6831b..b943b1c2d6 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -121,6 +121,15 @@ var ncgocall uint64 // number of cgo calls in total for dead m
// platforms. Syscalls may have untyped arguments on the stack, so
// it's not safe to grow or scan the stack.
//
+// cgocall should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ebitengine/purego
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname cgocall
//go:nosplit
func cgocall(fn, arg unsafe.Pointer) int32 {
if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
@@ -221,15 +230,18 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
//go:nosplit
func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
g0 := mp.g0
- if sp > g0.stack.lo && sp <= g0.stack.hi {
- // Stack already in bounds, nothing to do.
- return
- }
- if mp.ncgo > 0 {
+ inBound := sp > g0.stack.lo && sp <= g0.stack.hi
+ if mp.ncgo > 0 && !inBound {
// ncgo > 0 indicates that this M was in Go further up the stack
- // (it called C and is now receiving a callback). It is not
- // safe for the C call to change the stack out from under us.
+ // (it called C and is now receiving a callback).
+ //
+ // !inBound indicates that we were called with SP outside the
+ // expected system stack bounds (C changed the stack out from
+ // under us between the cgocall and cgocallback?).
+ //
+ // It is not safe for the C call to change the stack out from
+ // under us, so throw.
// Note that this case isn't possible for signal == true, as
// that is always passing a new M from needm.
@@ -247,12 +259,26 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
exit(2)
}
+ if !mp.isextra {
+ // We allocated the stack for standard Ms. Don't replace the
+ // stack bounds with estimated ones when we already initialized
+ // with the exact ones.
+ return
+ }
+
// This M does not have Go further up the stack. However, it may have
// previously called into Go, initializing the stack bounds. Between
// that call returning and now the stack may have changed (perhaps the
// C thread is running a coroutine library). We need to update the
// stack bounds for this case.
//
+ // N.B. we need to update the stack bounds even if SP appears to
+ // already be in bounds. Our "bounds" may actually be estimated dummy
+ // bounds (below). The actual stack bounds could have shifted but still
+ // have partial overlap with our dummy bounds. If we failed to update
+ // in that case, we could find ourselves seemingly called near the
+ // bottom of the stack bounds, where we quickly run out of space.
+
// Set the stack bounds to match the current stack. If we don't
// actually know how big the stack is, like we don't know how big any
// scheduling stack is, but we assume there's at least 32 kB. If we
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index 6ce824f62c..f1cd74a3fd 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -120,6 +120,16 @@ func makechan(t *chantype, size int) *hchan {
}
// chanbuf(c, i) is pointer to the i'th slot in the buffer.
+//
+// chanbuf should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/fjl/memsize
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname chanbuf
func chanbuf(c *hchan, i uint) unsafe.Pointer {
return add(c.buf, uintptr(i)*uintptr(c.elemsize))
}
diff --git a/src/runtime/chanbarrier_test.go b/src/runtime/chanbarrier_test.go
index d4795748bf..a85984d2d7 100644
--- a/src/runtime/chanbarrier_test.go
+++ b/src/runtime/chanbarrier_test.go
@@ -45,16 +45,17 @@ func doRequest(useSelect bool) (*response, error) {
}
func TestChanSendSelectBarrier(t *testing.T) {
+ t.Parallel()
testChanSendBarrier(true)
}
func TestChanSendBarrier(t *testing.T) {
+ t.Parallel()
testChanSendBarrier(false)
}
func testChanSendBarrier(useSelect bool) {
var wg sync.WaitGroup
- var globalMu sync.Mutex
outer := 100
inner := 100000
if testing.Short() || runtime.GOARCH == "wasm" {
@@ -72,12 +73,15 @@ func testChanSendBarrier(useSelect bool) {
if !ok {
panic(1)
}
- garbage = make([]byte, 1<<10)
+ garbage = makeByte()
}
- globalMu.Lock()
- global = garbage
- globalMu.Unlock()
+ _ = garbage
}()
}
wg.Wait()
}
+
+//go:noinline
+func makeByte() []byte {
+ return make([]byte, 1<<10)
+}
diff --git a/src/runtime/checkptr.go b/src/runtime/checkptr.go
index 810787bff5..be64ae7f0c 100644
--- a/src/runtime/checkptr.go
+++ b/src/runtime/checkptr.go
@@ -76,6 +76,16 @@ func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer) {
// checkptrBase(p1) == checkptrBase(p2). However, the converse/inverse
// is not necessarily true as allocations can have trailing padding,
// and multiple variables may be packed into a single allocation.
+//
+// checkptrBase should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname checkptrBase
func checkptrBase(p unsafe.Pointer) uintptr {
// stack
if gp := getg(); gp.stack.lo <= uintptr(p) && uintptr(p) < gp.stack.hi {
diff --git a/src/runtime/coro.go b/src/runtime/coro.go
index 98e789f133..30ada455e4 100644
--- a/src/runtime/coro.go
+++ b/src/runtime/coro.go
@@ -24,6 +24,11 @@ import "unsafe"
type coro struct {
gp guintptr
f func(*coro)
+
+ // State for validating thread-lock interactions.
+ mp *m
+ lockedExt uint32 // mp's external LockOSThread counter at coro creation time.
+ lockedInt uint32 // mp's internal lockOSThread counter at coro creation time.
}
//go:linkname newcoro
@@ -37,17 +42,24 @@ func newcoro(f func(*coro)) *coro {
pc := getcallerpc()
gp := getg()
systemstack(func() {
+ mp := gp.m
start := corostart
startfv := *(**funcval)(unsafe.Pointer(&start))
gp = newproc1(startfv, gp, pc, true, waitReasonCoroutine)
+
+ // Scribble down locked thread state if needed and/or donate
+ // thread-lock state to the new goroutine.
+ if mp.lockedExt+mp.lockedInt != 0 {
+ c.mp = mp
+ c.lockedExt = mp.lockedExt
+ c.lockedInt = mp.lockedInt
+ }
})
gp.coroarg = c
c.gp.set(gp)
return c
}
-//go:linkname corostart
-
// corostart is the entry func for a new coroutine.
// It runs the coroutine user function f passed to corostart
// and then calls coroexit to remove the extra concurrency.
@@ -56,8 +68,8 @@ func corostart() {
c := gp.coroarg
gp.coroarg = nil
+ defer coroexit(c)
c.f(c)
- coroexit(c)
}
// coroexit is like coroswitch but closes the coro
@@ -92,17 +104,28 @@ func coroswitch(c *coro) {
// It is important not to add more atomic operations or other
// expensive operations to the fast path.
func coroswitch_m(gp *g) {
- // TODO(go.dev/issue/65889): Something really nasty will happen if either
- // goroutine in this handoff tries to lock itself to an OS thread.
- // There's an explicit multiplexing going on here that needs to be
- // disabled if either the consumer or the iterator ends up in such
- // a state.
c := gp.coroarg
gp.coroarg = nil
exit := gp.coroexit
gp.coroexit = false
mp := gp.m
+ // Track and validate thread-lock interactions.
+ //
+ // The rules with thread-lock interactions are simple. When a coro goroutine is switched to,
+ // the same thread must be used, and the locked state must match with the thread-lock state of
+ // the goroutine which called newcoro. Thread-lock state consists of the thread and the number
+ // of internal (cgo callback, etc.) and external (LockOSThread) thread locks.
+ locked := gp.lockedm != 0
+ if c.mp != nil || locked {
+ if mp != c.mp || mp.lockedInt != c.lockedInt || mp.lockedExt != c.lockedExt {
+ print("coro: got thread ", unsafe.Pointer(mp), ", want ", unsafe.Pointer(c.mp), "\n")
+ print("coro: got lock internal ", mp.lockedInt, ", want ", c.lockedInt, "\n")
+ print("coro: got lock external ", mp.lockedExt, ", want ", c.lockedExt, "\n")
+ throw("coro: OS thread locking must match locking at coroutine creation")
+ }
+ }
+
// Acquire tracer for writing for the duration of this call.
//
// There's a lot of state manipulation performed with shortcuts
@@ -111,11 +134,18 @@ func coroswitch_m(gp *g) {
// emitting an event for every single transition.
trace := traceAcquire()
+ if locked {
+ // Detach the goroutine from the thread; we'll attach to the goroutine we're
+ // switching to before returning.
+ gp.lockedm.set(nil)
+ }
+
if exit {
- // TODO(65889): If we're locked to the current OS thread and
- // we exit here while tracing is enabled, we're going to end up
- // in a really bad place (traceAcquire also calls acquirem; there's
- // no releasem before the thread exits).
+ // The M might have a non-zero OS thread lock count when we get here, gdestroy
+ // will avoid destroying the M if the G isn't explicitly locked to it via lockedm,
+ // which we cleared above. It's fine to gdestroy here also, even when locked to
+ // the thread, because we'll be switching back to another goroutine anyway, which
+ // will take back its thread-lock state before returning.
gdestroy(gp)
gp = nil
} else {
@@ -158,6 +188,14 @@ func coroswitch_m(gp *g) {
}
}
+ // Check if we're switching to ourselves. This case is able to break our
+ // thread-lock invariants and an unbuffered channel implementation of
+ // coroswitch would deadlock. It's clear that this case should just not
+ // work.
+ if gnext == gp {
+ throw("coroswitch of a goroutine to itself")
+ }
+
// Emit the trace event after getting gnext but before changing curg.
// GoSwitch expects that the current G is running and that we haven't
// switched yet for correct status emission.
@@ -177,6 +215,12 @@ func coroswitch_m(gp *g) {
casgstatus(gnext, _Grunnable, _Grunning)
}
+ // Donate locked state.
+ if locked {
+ mp.lockedg.set(gnext)
+ gnext.lockedm.set(mp)
+ }
+
// Release the trace locker. We've completed all the necessary transitions..
if trace.ok() {
traceRelease(trace)
diff --git a/src/runtime/coro_test.go b/src/runtime/coro_test.go
new file mode 100644
index 0000000000..10b5e1ea08
--- /dev/null
+++ b/src/runtime/coro_test.go
@@ -0,0 +1,73 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "internal/testenv"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestCoroLockOSThread(t *testing.T) {
+ for _, test := range []string{
+ "CoroLockOSThreadIterLock",
+ "CoroLockOSThreadIterLockYield",
+ "CoroLockOSThreadLock",
+ "CoroLockOSThreadLockIterNested",
+ "CoroLockOSThreadLockIterLock",
+ "CoroLockOSThreadLockIterLockYield",
+ "CoroLockOSThreadLockIterYieldNewG",
+ "CoroLockOSThreadLockAfterPull",
+ "CoroLockOSThreadStopLocked",
+ "CoroLockOSThreadStopLockedIterNested",
+ } {
+ t.Run(test, func(t *testing.T) {
+ checkCoroTestProgOutput(t, runTestProg(t, "testprog", test))
+ })
+ }
+}
+
+func TestCoroCgoCallback(t *testing.T) {
+ testenv.MustHaveCGO(t)
+ if runtime.GOOS == "windows" {
+ t.Skip("coro cgo callback tests not supported on Windows")
+ }
+ for _, test := range []string{
+ "CoroCgoIterCallback",
+ "CoroCgoIterCallbackYield",
+ "CoroCgoCallback",
+ "CoroCgoCallbackIterNested",
+ "CoroCgoCallbackIterCallback",
+ "CoroCgoCallbackIterCallbackYield",
+ "CoroCgoCallbackAfterPull",
+ "CoroCgoStopCallback",
+ "CoroCgoStopCallbackIterNested",
+ } {
+ t.Run(test, func(t *testing.T) {
+ checkCoroTestProgOutput(t, runTestProg(t, "testprogcgo", test))
+ })
+ }
+}
+
+func checkCoroTestProgOutput(t *testing.T, output string) {
+ t.Helper()
+
+ c := strings.SplitN(output, "\n", 2)
+ if len(c) == 1 {
+ t.Fatalf("expected at least one complete line in the output, got:\n%s", output)
+ }
+ expect, ok := strings.CutPrefix(c[0], "expect: ")
+ if !ok {
+ t.Fatalf("expected first line of output to start with \"expect: \", got: %q", c[0])
+ }
+ rest := c[1]
+ if expect == "OK" && rest != "OK\n" {
+ t.Fatalf("expected just 'OK' in the output, got:\n%s", rest)
+ }
+ if !strings.Contains(rest, expect) {
+ t.Fatalf("expected %q in the output, got:\n%s", expect, rest)
+ }
+}
diff --git a/src/runtime/coverage/apis.go b/src/runtime/coverage/apis.go
deleted file mode 100644
index 15ba04a86f..0000000000
--- a/src/runtime/coverage/apis.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package coverage
-
-import (
- "fmt"
- "internal/coverage"
- "io"
- "sync/atomic"
- "unsafe"
-)
-
-// WriteMetaDir writes a coverage meta-data file for the currently
-// running program to the directory specified in 'dir'. An error will
-// be returned if the operation can't be completed successfully (for
-// example, if the currently running program was not built with
-// "-cover", or if the directory does not exist).
-func WriteMetaDir(dir string) error {
- if !finalHashComputed {
- return fmt.Errorf("error: no meta-data available (binary not built with -cover?)")
- }
- return emitMetaDataToDirectory(dir, getCovMetaList())
-}
-
-// WriteMeta writes the meta-data content (the payload that would
-// normally be emitted to a meta-data file) for the currently running
-// program to the writer 'w'. An error will be returned if the
-// operation can't be completed successfully (for example, if the
-// currently running program was not built with "-cover", or if a
-// write fails).
-func WriteMeta(w io.Writer) error {
- if w == nil {
- return fmt.Errorf("error: nil writer in WriteMeta")
- }
- if !finalHashComputed {
- return fmt.Errorf("error: no meta-data available (binary not built with -cover?)")
- }
- ml := getCovMetaList()
- return writeMetaData(w, ml, cmode, cgran, finalHash)
-}
-
-// WriteCountersDir writes a coverage counter-data file for the
-// currently running program to the directory specified in 'dir'. An
-// error will be returned if the operation can't be completed
-// successfully (for example, if the currently running program was not
-// built with "-cover", or if the directory does not exist). The
-// counter data written will be a snapshot taken at the point of the
-// call.
-func WriteCountersDir(dir string) error {
- if cmode != coverage.CtrModeAtomic {
- return fmt.Errorf("WriteCountersDir invoked for program built with -covermode=%s (please use -covermode=atomic)", cmode.String())
- }
- return emitCounterDataToDirectory(dir)
-}
-
-// WriteCounters writes coverage counter-data content for the
-// currently running program to the writer 'w'. An error will be
-// returned if the operation can't be completed successfully (for
-// example, if the currently running program was not built with
-// "-cover", or if a write fails). The counter data written will be a
-// snapshot taken at the point of the invocation.
-func WriteCounters(w io.Writer) error {
- if w == nil {
- return fmt.Errorf("error: nil writer in WriteCounters")
- }
- if cmode != coverage.CtrModeAtomic {
- return fmt.Errorf("WriteCounters invoked for program built with -covermode=%s (please use -covermode=atomic)", cmode.String())
- }
- // Ask the runtime for the list of coverage counter symbols.
- cl := getCovCounterList()
- if len(cl) == 0 {
- return fmt.Errorf("program not built with -cover")
- }
- if !finalHashComputed {
- return fmt.Errorf("meta-data not written yet, unable to write counter data")
- }
-
- pm := getCovPkgMap()
- s := &emitState{
- counterlist: cl,
- pkgmap: pm,
- }
- return s.emitCounterDataToWriter(w)
-}
-
-// ClearCounters clears/resets all coverage counter variables in the
-// currently running program. It returns an error if the program in
-// question was not built with the "-cover" flag. Clearing of coverage
-// counters is also not supported for programs not using atomic
-// counter mode (see more detailed comments below for the rationale
-// here).
-func ClearCounters() error {
- cl := getCovCounterList()
- if len(cl) == 0 {
- return fmt.Errorf("program not built with -cover")
- }
- if cmode != coverage.CtrModeAtomic {
- return fmt.Errorf("ClearCounters invoked for program built with -covermode=%s (please use -covermode=atomic)", cmode.String())
- }
-
- // Implementation note: this function would be faster and simpler
- // if we could just zero out the entire counter array, but for the
- // moment we go through and zero out just the slots in the array
- // corresponding to the counter values. We do this to avoid the
- // following bad scenario: suppose that a user builds their Go
- // program with "-cover", and that program has a function (call it
- // main.XYZ) that invokes ClearCounters:
- //
- // func XYZ() {
- // ... do some stuff ...
- // coverage.ClearCounters()
- // if someCondition { <<--- HERE
- // ...
- // }
- // }
- //
- // At the point where ClearCounters executes, main.XYZ has not yet
- // finished running, thus as soon as the call returns the line
- // marked "HERE" above will trigger the writing of a non-zero
- // value into main.XYZ's counter slab. However since we've just
- // finished clearing the entire counter segment, we will have lost
- // the values in the prolog portion of main.XYZ's counter slab
- // (nctrs, pkgid, funcid). This means that later on at the end of
- // program execution as we walk through the entire counter array
- // for the program looking for executed functions, we'll zoom past
- // main.XYZ's prolog (which was zero'd) and hit the non-zero
- // counter value corresponding to the "HERE" block, which will
- // then be interpreted as the start of another live function.
- // Things will go downhill from there.
- //
- // This same scenario is also a potential risk if the program is
- // running on an architecture that permits reordering of
- // writes/stores, since the inconsistency described above could
- // arise here. Example scenario:
- //
- // func ABC() {
- // ... // prolog
- // if alwaysTrue() {
- // XYZ() // counter update here
- // }
- // }
- //
- // In the instrumented version of ABC, the prolog of the function
- // will contain a series of stores to the initial portion of the
- // counter array to write number-of-counters, pkgid, funcid. Later
- // in the function there is also a store to increment a counter
- // for the block containing the call to XYZ(). If the CPU is
- // allowed to reorder stores and decides to issue the XYZ store
- // before the prolog stores, this could be observable as an
- // inconsistency similar to the one above. Hence the requirement
- // for atomic counter mode: according to package atomic docs,
- // "...operations that happen in a specific order on one thread,
- // will always be observed to happen in exactly that order by
- // another thread". Thus we can be sure that there will be no
- // inconsistency when reading the counter array from the thread
- // running ClearCounters.
-
- for _, c := range cl {
- sd := unsafe.Slice((*atomic.Uint32)(unsafe.Pointer(c.Counters)), int(c.Len))
- for i := 0; i < len(sd); i++ {
- // Skip ahead until the next non-zero value.
- sdi := sd[i].Load()
- if sdi == 0 {
- continue
- }
- // We found a function that was executed; clear its counters.
- nCtrs := sdi
- for j := 0; j < int(nCtrs); j++ {
- sd[i+coverage.FirstCtrOffset+j].Store(0)
- }
- // Move to next function.
- i += coverage.FirstCtrOffset + int(nCtrs) - 1
- }
- }
- return nil
-}
diff --git a/src/runtime/coverage/coverage.go b/src/runtime/coverage/coverage.go
new file mode 100644
index 0000000000..6b99a0bce6
--- /dev/null
+++ b/src/runtime/coverage/coverage.go
@@ -0,0 +1,66 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package coverage
+
+import (
+ "internal/coverage/cfile"
+ "io"
+)
+
+// initHook is invoked from main.init in programs built with -cover.
+// The call is emitted by the compiler.
+func initHook(istest bool) {
+ cfile.InitHook(istest)
+}
+
+// WriteMetaDir writes a coverage meta-data file for the currently
+// running program to the directory specified in 'dir'. An error will
+// be returned if the operation can't be completed successfully (for
+// example, if the currently running program was not built with
+// "-cover", or if the directory does not exist).
+func WriteMetaDir(dir string) error {
+ return cfile.WriteMetaDir(dir)
+}
+
+// WriteMeta writes the meta-data content (the payload that would
+// normally be emitted to a meta-data file) for the currently running
+// program to the writer 'w'. An error will be returned if the
+// operation can't be completed successfully (for example, if the
+// currently running program was not built with "-cover", or if a
+// write fails).
+func WriteMeta(w io.Writer) error {
+ return cfile.WriteMeta(w)
+}
+
+// WriteCountersDir writes a coverage counter-data file for the
+// currently running program to the directory specified in 'dir'. An
+// error will be returned if the operation can't be completed
+// successfully (for example, if the currently running program was not
+// built with "-cover", or if the directory does not exist). The
+// counter data written will be a snapshot taken at the point of the
+// call.
+func WriteCountersDir(dir string) error {
+ return cfile.WriteCountersDir(dir)
+}
+
+// WriteCounters writes coverage counter-data content for the
+// currently running program to the writer 'w'. An error will be
+// returned if the operation can't be completed successfully (for
+// example, if the currently running program was not built with
+// "-cover", or if a write fails). The counter data written will be a
+// snapshot taken at the point of the invocation.
+func WriteCounters(w io.Writer) error {
+ return cfile.WriteCounters(w)
+}
+
+// ClearCounters clears/resets all coverage counter variables in the
+// currently running program. It returns an error if the program in
+// question was not built with the "-cover" flag. Clearing of coverage
+// counters is also not supported for programs not using atomic
+// counter mode (see more detailed comments below for the rationale
+// here).
+func ClearCounters() error {
+ return cfile.ClearCounters()
+}
diff --git a/src/runtime/coverage/dummy.s b/src/runtime/coverage/dummy.s
deleted file mode 100644
index 75928593a0..0000000000
--- a/src/runtime/coverage/dummy.s
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The runtime package uses //go:linkname to push a few functions into this
-// package but we still need a .s file so the Go tool does not pass -complete
-// to 'go tool compile' so the latter does not complain about Go functions
-// with no bodies.
diff --git a/src/runtime/coverage/emit.go b/src/runtime/coverage/emit.go
deleted file mode 100644
index 6fe04daea8..0000000000
--- a/src/runtime/coverage/emit.go
+++ /dev/null
@@ -1,609 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package coverage
-
-import (
- "crypto/md5"
- "fmt"
- "internal/coverage"
- "internal/coverage/encodecounter"
- "internal/coverage/encodemeta"
- "internal/coverage/rtcov"
- "io"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
- "sync/atomic"
- "time"
- "unsafe"
-)
-
-// This file contains functions that support the writing of data files
-// emitted at the end of code coverage testing runs, from instrumented
-// executables.
-
-// getCovMetaList returns a list of meta-data blobs registered
-// for the currently executing instrumented program. It is defined in the
-// runtime.
-func getCovMetaList() []rtcov.CovMetaBlob
-
-// getCovCounterList returns a list of counter-data blobs registered
-// for the currently executing instrumented program. It is defined in the
-// runtime.
-func getCovCounterList() []rtcov.CovCounterBlob
-
-// getCovPkgMap returns a map storing the remapped package IDs for
-// hard-coded runtime packages (see internal/coverage/pkgid.go for
-// more on why hard-coded package IDs are needed). This function
-// is defined in the runtime.
-func getCovPkgMap() map[int]int
-
-// emitState holds useful state information during the emit process.
-//
-// When an instrumented program finishes execution and starts the
-// process of writing out coverage data, it's possible that an
-// existing meta-data file already exists in the output directory. In
-// this case openOutputFiles() below will leave the 'mf' field below
-// as nil. If a new meta-data file is needed, field 'mfname' will be
-// the final desired path of the meta file, 'mftmp' will be a
-// temporary file, and 'mf' will be an open os.File pointer for
-// 'mftmp'. The meta-data file payload will be written to 'mf', the
-// temp file will be then closed and renamed (from 'mftmp' to
-// 'mfname'), so as to insure that the meta-data file is created
-// atomically; we want this so that things work smoothly in cases
-// where there are several instances of a given instrumented program
-// all terminating at the same time and trying to create meta-data
-// files simultaneously.
-//
-// For counter data files there is less chance of a collision, hence
-// the openOutputFiles() stores the counter data file in 'cfname' and
-// then places the *io.File into 'cf'.
-type emitState struct {
- mfname string // path of final meta-data output file
- mftmp string // path to meta-data temp file (if needed)
- mf *os.File // open os.File for meta-data temp file
- cfname string // path of final counter data file
- cftmp string // path to counter data temp file
- cf *os.File // open os.File for counter data file
- outdir string // output directory
-
- // List of meta-data symbols obtained from the runtime
- metalist []rtcov.CovMetaBlob
-
- // List of counter-data symbols obtained from the runtime
- counterlist []rtcov.CovCounterBlob
-
- // Table to use for remapping hard-coded pkg ids.
- pkgmap map[int]int
-
- // emit debug trace output
- debug bool
-}
-
-var (
- // finalHash is computed at init time from the list of meta-data
- // symbols registered during init. It is used both for writing the
- // meta-data file and counter-data files.
- finalHash [16]byte
- // Set to true when we've computed finalHash + finalMetaLen.
- finalHashComputed bool
- // Total meta-data length.
- finalMetaLen uint64
- // Records whether we've already attempted to write meta-data.
- metaDataEmitAttempted bool
- // Counter mode for this instrumented program run.
- cmode coverage.CounterMode
- // Counter granularity for this instrumented program run.
- cgran coverage.CounterGranularity
- // Cached value of GOCOVERDIR environment variable.
- goCoverDir string
- // Copy of os.Args made at init time, converted into map format.
- capturedOsArgs map[string]string
- // Flag used in tests to signal that coverage data already written.
- covProfileAlreadyEmitted bool
-)
-
-// fileType is used to select between counter-data files and
-// meta-data files.
-type fileType int
-
-const (
- noFile = 1 << iota
- metaDataFile
- counterDataFile
-)
-
-// emitMetaData emits the meta-data output file for this coverage run.
-// This entry point is intended to be invoked by the compiler from
-// an instrumented program's main package init func.
-func emitMetaData() {
- if covProfileAlreadyEmitted {
- return
- }
- ml, err := prepareForMetaEmit()
- if err != nil {
- fmt.Fprintf(os.Stderr, "error: coverage meta-data prep failed: %v\n", err)
- if os.Getenv("GOCOVERDEBUG") != "" {
- panic("meta-data write failure")
- }
- }
- if len(ml) == 0 {
- fmt.Fprintf(os.Stderr, "program not built with -cover\n")
- return
- }
-
- goCoverDir = os.Getenv("GOCOVERDIR")
- if goCoverDir == "" {
- fmt.Fprintf(os.Stderr, "warning: GOCOVERDIR not set, no coverage data emitted\n")
- return
- }
-
- if err := emitMetaDataToDirectory(goCoverDir, ml); err != nil {
- fmt.Fprintf(os.Stderr, "error: coverage meta-data emit failed: %v\n", err)
- if os.Getenv("GOCOVERDEBUG") != "" {
- panic("meta-data write failure")
- }
- }
-}
-
-func modeClash(m coverage.CounterMode) bool {
- if m == coverage.CtrModeRegOnly || m == coverage.CtrModeTestMain {
- return false
- }
- if cmode == coverage.CtrModeInvalid {
- cmode = m
- return false
- }
- return cmode != m
-}
-
-func granClash(g coverage.CounterGranularity) bool {
- if cgran == coverage.CtrGranularityInvalid {
- cgran = g
- return false
- }
- return cgran != g
-}
-
-// prepareForMetaEmit performs preparatory steps needed prior to
-// emitting a meta-data file, notably computing a final hash of
-// all meta-data blobs and capturing os args.
-func prepareForMetaEmit() ([]rtcov.CovMetaBlob, error) {
- // Ask the runtime for the list of coverage meta-data symbols.
- ml := getCovMetaList()
-
- // In the normal case (go build -o prog.exe ... ; ./prog.exe)
- // len(ml) will always be non-zero, but we check here since at
- // some point this function will be reachable via user-callable
- // APIs (for example, to write out coverage data from a server
- // program that doesn't ever call os.Exit).
- if len(ml) == 0 {
- return nil, nil
- }
-
- s := &emitState{
- metalist: ml,
- debug: os.Getenv("GOCOVERDEBUG") != "",
- }
-
- // Capture os.Args() now so as to avoid issues if args
- // are rewritten during program execution.
- capturedOsArgs = captureOsArgs()
-
- if s.debug {
- fmt.Fprintf(os.Stderr, "=+= GOCOVERDIR is %s\n", os.Getenv("GOCOVERDIR"))
- fmt.Fprintf(os.Stderr, "=+= contents of covmetalist:\n")
- for k, b := range ml {
- fmt.Fprintf(os.Stderr, "=+= slot: %d path: %s ", k, b.PkgPath)
- if b.PkgID != -1 {
- fmt.Fprintf(os.Stderr, " hcid: %d", b.PkgID)
- }
- fmt.Fprintf(os.Stderr, "\n")
- }
- pm := getCovPkgMap()
- fmt.Fprintf(os.Stderr, "=+= remap table:\n")
- for from, to := range pm {
- fmt.Fprintf(os.Stderr, "=+= from %d to %d\n",
- uint32(from), uint32(to))
- }
- }
-
- h := md5.New()
- tlen := uint64(unsafe.Sizeof(coverage.MetaFileHeader{}))
- for _, entry := range ml {
- if _, err := h.Write(entry.Hash[:]); err != nil {
- return nil, err
- }
- tlen += uint64(entry.Len)
- ecm := coverage.CounterMode(entry.CounterMode)
- if modeClash(ecm) {
- return nil, fmt.Errorf("coverage counter mode clash: package %s uses mode=%d, but package %s uses mode=%s\n", ml[0].PkgPath, cmode, entry.PkgPath, ecm)
- }
- ecg := coverage.CounterGranularity(entry.CounterGranularity)
- if granClash(ecg) {
- return nil, fmt.Errorf("coverage counter granularity clash: package %s uses gran=%d, but package %s uses gran=%s\n", ml[0].PkgPath, cgran, entry.PkgPath, ecg)
- }
- }
-
- // Hash mode and granularity as well.
- h.Write([]byte(cmode.String()))
- h.Write([]byte(cgran.String()))
-
- // Compute final digest.
- fh := h.Sum(nil)
- copy(finalHash[:], fh)
- finalHashComputed = true
- finalMetaLen = tlen
-
- return ml, nil
-}
-
-// emitMetaDataToDirectory emits the meta-data output file to the specified
-// directory, returning an error if something went wrong.
-func emitMetaDataToDirectory(outdir string, ml []rtcov.CovMetaBlob) error {
- ml, err := prepareForMetaEmit()
- if err != nil {
- return err
- }
- if len(ml) == 0 {
- return nil
- }
-
- metaDataEmitAttempted = true
-
- s := &emitState{
- metalist: ml,
- debug: os.Getenv("GOCOVERDEBUG") != "",
- outdir: outdir,
- }
-
- // Open output files.
- if err := s.openOutputFiles(finalHash, finalMetaLen, metaDataFile); err != nil {
- return err
- }
-
- // Emit meta-data file only if needed (may already be present).
- if s.needMetaDataFile() {
- if err := s.emitMetaDataFile(finalHash, finalMetaLen); err != nil {
- return err
- }
- }
- return nil
-}
-
-// emitCounterData emits the counter data output file for this coverage run.
-// This entry point is intended to be invoked by the runtime when an
-// instrumented program is terminating or calling os.Exit().
-func emitCounterData() {
- if goCoverDir == "" || !finalHashComputed || covProfileAlreadyEmitted {
- return
- }
- if err := emitCounterDataToDirectory(goCoverDir); err != nil {
- fmt.Fprintf(os.Stderr, "error: coverage counter data emit failed: %v\n", err)
- if os.Getenv("GOCOVERDEBUG") != "" {
- panic("counter-data write failure")
- }
- }
-}
-
-// emitCounterDataToDirectory emits the counter-data output file for this coverage run.
-func emitCounterDataToDirectory(outdir string) error {
- // Ask the runtime for the list of coverage counter symbols.
- cl := getCovCounterList()
- if len(cl) == 0 {
- // no work to do here.
- return nil
- }
-
- if !finalHashComputed {
- return fmt.Errorf("error: meta-data not available (binary not built with -cover?)")
- }
-
- // Ask the runtime for the list of coverage counter symbols.
- pm := getCovPkgMap()
- s := &emitState{
- counterlist: cl,
- pkgmap: pm,
- outdir: outdir,
- debug: os.Getenv("GOCOVERDEBUG") != "",
- }
-
- // Open output file.
- if err := s.openOutputFiles(finalHash, finalMetaLen, counterDataFile); err != nil {
- return err
- }
- if s.cf == nil {
- return fmt.Errorf("counter data output file open failed (no additional info")
- }
-
- // Emit counter data file.
- if err := s.emitCounterDataFile(finalHash, s.cf); err != nil {
- return err
- }
- if err := s.cf.Close(); err != nil {
- return fmt.Errorf("closing counter data file: %v", err)
- }
-
- // Counter file has now been closed. Rename the temp to the
- // final desired path.
- if err := os.Rename(s.cftmp, s.cfname); err != nil {
- return fmt.Errorf("writing %s: rename from %s failed: %v\n", s.cfname, s.cftmp, err)
- }
-
- return nil
-}
-
-// emitCounterDataToWriter emits counter data for this coverage run to an io.Writer.
-func (s *emitState) emitCounterDataToWriter(w io.Writer) error {
- if err := s.emitCounterDataFile(finalHash, w); err != nil {
- return err
- }
- return nil
-}
-
-// openMetaFile determines whether we need to emit a meta-data output
-// file, or whether we can reuse the existing file in the coverage out
-// dir. It updates mfname/mftmp/mf fields in 's', returning an error
-// if something went wrong. See the comment on the emitState type
-// definition above for more on how file opening is managed.
-func (s *emitState) openMetaFile(metaHash [16]byte, metaLen uint64) error {
-
- // Open meta-outfile for reading to see if it exists.
- fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, metaHash)
- s.mfname = filepath.Join(s.outdir, fn)
- fi, err := os.Stat(s.mfname)
- if err != nil || fi.Size() != int64(metaLen) {
- // We need a new meta-file.
- tname := "tmp." + fn + strconv.FormatInt(time.Now().UnixNano(), 10)
- s.mftmp = filepath.Join(s.outdir, tname)
- s.mf, err = os.Create(s.mftmp)
- if err != nil {
- return fmt.Errorf("creating meta-data file %s: %v", s.mftmp, err)
- }
- }
- return nil
-}
-
-// openCounterFile opens an output file for the counter data portion
-// of a test coverage run. If updates the 'cfname' and 'cf' fields in
-// 's', returning an error if something went wrong.
-func (s *emitState) openCounterFile(metaHash [16]byte) error {
- processID := os.Getpid()
- fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, metaHash, processID, time.Now().UnixNano())
- s.cfname = filepath.Join(s.outdir, fn)
- s.cftmp = filepath.Join(s.outdir, "tmp."+fn)
- var err error
- s.cf, err = os.Create(s.cftmp)
- if err != nil {
- return fmt.Errorf("creating counter data file %s: %v", s.cftmp, err)
- }
- return nil
-}
-
-// openOutputFiles opens output files in preparation for emitting
-// coverage data. In the case of the meta-data file, openOutputFiles
-// may determine that we can reuse an existing meta-data file in the
-// outdir, in which case it will leave the 'mf' field in the state
-// struct as nil. If a new meta-file is needed, the field 'mfname'
-// will be the final desired path of the meta file, 'mftmp' will be a
-// temporary file, and 'mf' will be an open os.File pointer for
-// 'mftmp'. The idea is that the client/caller will write content into
-// 'mf', close it, and then rename 'mftmp' to 'mfname'. This function
-// also opens the counter data output file, setting 'cf' and 'cfname'
-// in the state struct.
-func (s *emitState) openOutputFiles(metaHash [16]byte, metaLen uint64, which fileType) error {
- fi, err := os.Stat(s.outdir)
- if err != nil {
- return fmt.Errorf("output directory %q inaccessible (err: %v); no coverage data written", s.outdir, err)
- }
- if !fi.IsDir() {
- return fmt.Errorf("output directory %q not a directory; no coverage data written", s.outdir)
- }
-
- if (which & metaDataFile) != 0 {
- if err := s.openMetaFile(metaHash, metaLen); err != nil {
- return err
- }
- }
- if (which & counterDataFile) != 0 {
- if err := s.openCounterFile(metaHash); err != nil {
- return err
- }
- }
- return nil
-}
-
-// emitMetaDataFile emits coverage meta-data to a previously opened
-// temporary file (s.mftmp), then renames the generated file to the
-// final path (s.mfname).
-func (s *emitState) emitMetaDataFile(finalHash [16]byte, tlen uint64) error {
- if err := writeMetaData(s.mf, s.metalist, cmode, cgran, finalHash); err != nil {
- return fmt.Errorf("writing %s: %v\n", s.mftmp, err)
- }
- if err := s.mf.Close(); err != nil {
- return fmt.Errorf("closing meta data temp file: %v", err)
- }
-
- // Temp file has now been flushed and closed. Rename the temp to the
- // final desired path.
- if err := os.Rename(s.mftmp, s.mfname); err != nil {
- return fmt.Errorf("writing %s: rename from %s failed: %v\n", s.mfname, s.mftmp, err)
- }
-
- return nil
-}
-
-// needMetaDataFile returns TRUE if we need to emit a meta-data file
-// for this program run. It should be used only after
-// openOutputFiles() has been invoked.
-func (s *emitState) needMetaDataFile() bool {
- return s.mf != nil
-}
-
-func writeMetaData(w io.Writer, metalist []rtcov.CovMetaBlob, cmode coverage.CounterMode, gran coverage.CounterGranularity, finalHash [16]byte) error {
- mfw := encodemeta.NewCoverageMetaFileWriter("<io.Writer>", w)
-
- var blobs [][]byte
- for _, e := range metalist {
- sd := unsafe.Slice(e.P, int(e.Len))
- blobs = append(blobs, sd)
- }
- return mfw.Write(finalHash, blobs, cmode, gran)
-}
-
-func (s *emitState) VisitFuncs(f encodecounter.CounterVisitorFn) error {
- var tcounters []uint32
-
- rdCounters := func(actrs []atomic.Uint32, ctrs []uint32) []uint32 {
- ctrs = ctrs[:0]
- for i := range actrs {
- ctrs = append(ctrs, actrs[i].Load())
- }
- return ctrs
- }
-
- dpkg := uint32(0)
- for _, c := range s.counterlist {
- sd := unsafe.Slice((*atomic.Uint32)(unsafe.Pointer(c.Counters)), int(c.Len))
- for i := 0; i < len(sd); i++ {
- // Skip ahead until the next non-zero value.
- sdi := sd[i].Load()
- if sdi == 0 {
- continue
- }
-
- // We found a function that was executed.
- nCtrs := sd[i+coverage.NumCtrsOffset].Load()
- pkgId := sd[i+coverage.PkgIdOffset].Load()
- funcId := sd[i+coverage.FuncIdOffset].Load()
- cst := i + coverage.FirstCtrOffset
- counters := sd[cst : cst+int(nCtrs)]
-
- // Check to make sure that we have at least one live
- // counter. See the implementation note in ClearCoverageCounters
- // for a description of why this is needed.
- isLive := false
- for i := 0; i < len(counters); i++ {
- if counters[i].Load() != 0 {
- isLive = true
- break
- }
- }
- if !isLive {
- // Skip this function.
- i += coverage.FirstCtrOffset + int(nCtrs) - 1
- continue
- }
-
- if s.debug {
- if pkgId != dpkg {
- dpkg = pkgId
- fmt.Fprintf(os.Stderr, "\n=+= %d: pk=%d visit live fcn",
- i, pkgId)
- }
- fmt.Fprintf(os.Stderr, " {i=%d F%d NC%d}", i, funcId, nCtrs)
- }
-
- // Vet and/or fix up package ID. A package ID of zero
- // indicates that there is some new package X that is a
- // runtime dependency, and this package has code that
- // executes before its corresponding init package runs.
- // This is a fatal error that we should only see during
- // Go development (e.g. tip).
- ipk := int32(pkgId)
- if ipk == 0 {
- fmt.Fprintf(os.Stderr, "\n")
- reportErrorInHardcodedList(int32(i), ipk, funcId, nCtrs)
- } else if ipk < 0 {
- if newId, ok := s.pkgmap[int(ipk)]; ok {
- pkgId = uint32(newId)
- } else {
- fmt.Fprintf(os.Stderr, "\n")
- reportErrorInHardcodedList(int32(i), ipk, funcId, nCtrs)
- }
- } else {
- // The package ID value stored in the counter array
- // has 1 added to it (so as to preclude the
- // possibility of a zero value ; see
- // runtime.addCovMeta), so subtract off 1 here to form
- // the real package ID.
- pkgId--
- }
-
- tcounters = rdCounters(counters, tcounters)
- if err := f(pkgId, funcId, tcounters); err != nil {
- return err
- }
-
- // Skip over this function.
- i += coverage.FirstCtrOffset + int(nCtrs) - 1
- }
- if s.debug {
- fmt.Fprintf(os.Stderr, "\n")
- }
- }
- return nil
-}
-
-// captureOsArgs converts os.Args() into the format we use to store
-// this info in the counter data file (counter data file "args"
-// section is a generic key-value collection). See the 'args' section
-// in internal/coverage/defs.go for more info. The args map
-// is also used to capture GOOS + GOARCH values as well.
-func captureOsArgs() map[string]string {
- m := make(map[string]string)
- m["argc"] = strconv.Itoa(len(os.Args))
- for k, a := range os.Args {
- m[fmt.Sprintf("argv%d", k)] = a
- }
- m["GOOS"] = runtime.GOOS
- m["GOARCH"] = runtime.GOARCH
- return m
-}
-
-// emitCounterDataFile emits the counter data portion of a
-// coverage output file (to the file 's.cf').
-func (s *emitState) emitCounterDataFile(finalHash [16]byte, w io.Writer) error {
- cfw := encodecounter.NewCoverageDataWriter(w, coverage.CtrULeb128)
- if err := cfw.Write(finalHash, capturedOsArgs, s); err != nil {
- return err
- }
- return nil
-}
-
-// markProfileEmitted signals the runtime/coverage machinery that
-// coverage data output files have already been written out, and there
-// is no need to take any additional action at exit time. This
-// function is called (via linknamed reference) from the
-// coverage-related boilerplate code in _testmain.go emitted for go
-// unit tests.
-func markProfileEmitted(val bool) {
- covProfileAlreadyEmitted = val
-}
-
-func reportErrorInHardcodedList(slot, pkgID int32, fnID, nCtrs uint32) {
- metaList := getCovMetaList()
- pkgMap := getCovPkgMap()
-
- println("internal error in coverage meta-data tracking:")
- println("encountered bad pkgID:", pkgID, " at slot:", slot,
- " fnID:", fnID, " numCtrs:", nCtrs)
- println("list of hard-coded runtime package IDs needs revising.")
- println("[see the comment on the 'rtPkgs' var in ")
- println(" <goroot>/src/internal/coverage/pkid.go]")
- println("registered list:")
- for k, b := range metaList {
- print("slot: ", k, " path='", b.PkgPath, "' ")
- if b.PkgID != -1 {
- print(" hard-coded id: ", b.PkgID)
- }
- println("")
- }
- println("remap table:")
- for from, to := range pkgMap {
- println("from ", from, " to ", to)
- }
-}
diff --git a/src/runtime/coverage/emitdata_test.go b/src/runtime/coverage/emitdata_test.go
deleted file mode 100644
index 3558dd2d88..0000000000
--- a/src/runtime/coverage/emitdata_test.go
+++ /dev/null
@@ -1,550 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package coverage
-
-import (
- "fmt"
- "internal/coverage"
- "internal/goexperiment"
- "internal/platform"
- "internal/testenv"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
-)
-
-// Set to true for debugging (linux only).
-const fixedTestDir = false
-
-func TestCoverageApis(t *testing.T) {
- if testing.Short() {
- t.Skipf("skipping test: too long for short mode")
- }
- if !goexperiment.CoverageRedesign {
- t.Skipf("skipping new coverage tests (experiment not enabled)")
- }
- testenv.MustHaveGoBuild(t)
- dir := t.TempDir()
- if fixedTestDir {
- dir = "/tmp/qqqzzz"
- os.RemoveAll(dir)
- mkdir(t, dir)
- }
-
- // Build harness. We need two copies of the harness, one built
- // with -covermode=atomic and one built non-atomic.
- bdir1 := mkdir(t, filepath.Join(dir, "build1"))
- hargs1 := []string{"-covermode=atomic", "-coverpkg=all"}
- atomicHarnessPath := buildHarness(t, bdir1, hargs1)
- nonAtomicMode := testing.CoverMode()
- if testing.CoverMode() == "atomic" {
- nonAtomicMode = "set"
- }
- bdir2 := mkdir(t, filepath.Join(dir, "build2"))
- hargs2 := []string{"-coverpkg=all", "-covermode=" + nonAtomicMode}
- nonAtomicHarnessPath := buildHarness(t, bdir2, hargs2)
-
- t.Logf("atomic harness path is %s", atomicHarnessPath)
- t.Logf("non-atomic harness path is %s", nonAtomicHarnessPath)
-
- // Sub-tests for each API we want to inspect, plus
- // extras for error testing.
- t.Run("emitToDir", func(t *testing.T) {
- t.Parallel()
- testEmitToDir(t, atomicHarnessPath, dir)
- })
- t.Run("emitToWriter", func(t *testing.T) {
- t.Parallel()
- testEmitToWriter(t, atomicHarnessPath, dir)
- })
- t.Run("emitToNonexistentDir", func(t *testing.T) {
- t.Parallel()
- testEmitToNonexistentDir(t, atomicHarnessPath, dir)
- })
- t.Run("emitToNilWriter", func(t *testing.T) {
- t.Parallel()
- testEmitToNilWriter(t, atomicHarnessPath, dir)
- })
- t.Run("emitToFailingWriter", func(t *testing.T) {
- t.Parallel()
- testEmitToFailingWriter(t, atomicHarnessPath, dir)
- })
- t.Run("emitWithCounterClear", func(t *testing.T) {
- t.Parallel()
- testEmitWithCounterClear(t, atomicHarnessPath, dir)
- })
- t.Run("emitToDirNonAtomic", func(t *testing.T) {
- t.Parallel()
- testEmitToDirNonAtomic(t, nonAtomicHarnessPath, nonAtomicMode, dir)
- })
- t.Run("emitToWriterNonAtomic", func(t *testing.T) {
- t.Parallel()
- testEmitToWriterNonAtomic(t, nonAtomicHarnessPath, nonAtomicMode, dir)
- })
- t.Run("emitWithCounterClearNonAtomic", func(t *testing.T) {
- t.Parallel()
- testEmitWithCounterClearNonAtomic(t, nonAtomicHarnessPath, nonAtomicMode, dir)
- })
-}
-
-// upmergeCoverData helps improve coverage data for this package
-// itself. If this test itself is being invoked with "-cover", then
-// what we'd like is for package coverage data (that is, coverage for
-// routines in "runtime/coverage") to be incorporated into the test
-// run from the "harness.exe" runs we've just done. We can accomplish
-// this by doing a merge from the harness gocoverdir's to the test
-// gocoverdir.
-func upmergeCoverData(t *testing.T, gocoverdir string, mode string) {
- if testing.CoverMode() != mode {
- return
- }
- testGoCoverDir := os.Getenv("GOCOVERDIR")
- if testGoCoverDir == "" {
- return
- }
- args := []string{"tool", "covdata", "merge", "-pkg=runtime/coverage",
- "-o", testGoCoverDir, "-i", gocoverdir}
- t.Logf("up-merge of covdata from %s to %s", gocoverdir, testGoCoverDir)
- t.Logf("executing: go %+v", args)
- cmd := exec.Command(testenv.GoToolPath(t), args...)
- if b, err := cmd.CombinedOutput(); err != nil {
- t.Fatalf("covdata merge failed (%v): %s", err, b)
- }
-}
-
-// buildHarness builds the helper program "harness.exe".
-func buildHarness(t *testing.T, dir string, opts []string) string {
- harnessPath := filepath.Join(dir, "harness.exe")
- harnessSrc := filepath.Join("testdata", "harness.go")
- args := []string{"build", "-o", harnessPath}
- args = append(args, opts...)
- args = append(args, harnessSrc)
- //t.Logf("harness build: go %+v\n", args)
- cmd := exec.Command(testenv.GoToolPath(t), args...)
- if b, err := cmd.CombinedOutput(); err != nil {
- t.Fatalf("build failed (%v): %s", err, b)
- }
- return harnessPath
-}
-
-func mkdir(t *testing.T, d string) string {
- t.Helper()
- if err := os.Mkdir(d, 0777); err != nil {
- t.Fatalf("mkdir failed: %v", err)
- }
- return d
-}
-
-// updateGoCoverDir updates the specified environment 'env' to set
-// GOCOVERDIR to 'gcd' (if setGoCoverDir is TRUE) or removes
-// GOCOVERDIR from the environment (if setGoCoverDir is false).
-func updateGoCoverDir(env []string, gcd string, setGoCoverDir bool) []string {
- rv := []string{}
- found := false
- for _, v := range env {
- if strings.HasPrefix(v, "GOCOVERDIR=") {
- if !setGoCoverDir {
- continue
- }
- v = "GOCOVERDIR=" + gcd
- found = true
- }
- rv = append(rv, v)
- }
- if !found && setGoCoverDir {
- rv = append(rv, "GOCOVERDIR="+gcd)
- }
- return rv
-}
-
-func runHarness(t *testing.T, harnessPath string, tp string, setGoCoverDir bool, rdir, edir string) (string, error) {
- t.Logf("running: %s -tp %s -o %s with rdir=%s and GOCOVERDIR=%v", harnessPath, tp, edir, rdir, setGoCoverDir)
- cmd := exec.Command(harnessPath, "-tp", tp, "-o", edir)
- cmd.Dir = rdir
- cmd.Env = updateGoCoverDir(os.Environ(), rdir, setGoCoverDir)
- b, err := cmd.CombinedOutput()
- //t.Logf("harness run output: %s\n", string(b))
- return string(b), err
-}
-
-func testForSpecificFunctions(t *testing.T, dir string, want []string, avoid []string) string {
- args := []string{"tool", "covdata", "debugdump",
- "-live", "-pkg=command-line-arguments", "-i=" + dir}
- t.Logf("running: go %v\n", args)
- cmd := exec.Command(testenv.GoToolPath(t), args...)
- b, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("'go tool covdata failed (%v): %s", err, b)
- }
- output := string(b)
- rval := ""
- for _, f := range want {
- wf := "Func: " + f + "\n"
- if strings.Contains(output, wf) {
- continue
- }
- rval += fmt.Sprintf("error: output should contain %q but does not\n", wf)
- }
- for _, f := range avoid {
- wf := "Func: " + f + "\n"
- if strings.Contains(output, wf) {
- rval += fmt.Sprintf("error: output should not contain %q but does\n", wf)
- }
- }
- if rval != "" {
- t.Logf("=-= begin output:\n" + output + "\n=-= end output\n")
- }
- return rval
-}
-
-func withAndWithoutRunner(f func(setit bool, tag string)) {
- // Run 'f' with and without GOCOVERDIR set.
- for i := 0; i < 2; i++ {
- tag := "x"
- setGoCoverDir := true
- if i == 0 {
- setGoCoverDir = false
- tag = "y"
- }
- f(setGoCoverDir, tag)
- }
-}
-
-func mktestdirs(t *testing.T, tag, tp, dir string) (string, string) {
- t.Helper()
- rdir := mkdir(t, filepath.Join(dir, tp+"-rdir-"+tag))
- edir := mkdir(t, filepath.Join(dir, tp+"-edir-"+tag))
- return rdir, edir
-}
-
-func testEmitToDir(t *testing.T, harnessPath string, dir string) {
- withAndWithoutRunner(func(setGoCoverDir bool, tag string) {
- tp := "emitToDir"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp,
- setGoCoverDir, rdir, edir)
- if err != nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp emitDir': %v", err)
- }
-
- // Just check to make sure meta-data file and counter data file were
- // written. Another alternative would be to run "go tool covdata"
- // or equivalent, but for now, this is what we've got.
- dents, err := os.ReadDir(edir)
- if err != nil {
- t.Fatalf("os.ReadDir(%s) failed: %v", edir, err)
- }
- mfc := 0
- cdc := 0
- for _, e := range dents {
- if e.IsDir() {
- continue
- }
- if strings.HasPrefix(e.Name(), coverage.MetaFilePref) {
- mfc++
- } else if strings.HasPrefix(e.Name(), coverage.CounterFilePref) {
- cdc++
- }
- }
- wantmf := 1
- wantcf := 1
- if mfc != wantmf {
- t.Errorf("EmitToDir: want %d meta-data files, got %d\n", wantmf, mfc)
- }
- if cdc != wantcf {
- t.Errorf("EmitToDir: want %d counter-data files, got %d\n", wantcf, cdc)
- }
- upmergeCoverData(t, edir, "atomic")
- upmergeCoverData(t, rdir, "atomic")
- })
-}
-
-func testEmitToWriter(t *testing.T, harnessPath string, dir string) {
- withAndWithoutRunner(func(setGoCoverDir bool, tag string) {
- tp := "emitToWriter"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp, setGoCoverDir, rdir, edir)
- if err != nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': %v", tp, err)
- }
- want := []string{"main", tp}
- avoid := []string{"final"}
- if msg := testForSpecificFunctions(t, edir, want, avoid); msg != "" {
- t.Errorf("coverage data from %q output match failed: %s", tp, msg)
- }
- upmergeCoverData(t, edir, "atomic")
- upmergeCoverData(t, rdir, "atomic")
- })
-}
-
-func testEmitToNonexistentDir(t *testing.T, harnessPath string, dir string) {
- withAndWithoutRunner(func(setGoCoverDir bool, tag string) {
- tp := "emitToNonexistentDir"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp, setGoCoverDir, rdir, edir)
- if err != nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': %v", tp, err)
- }
- upmergeCoverData(t, edir, "atomic")
- upmergeCoverData(t, rdir, "atomic")
- })
-}
-
-func testEmitToUnwritableDir(t *testing.T, harnessPath string, dir string) {
- withAndWithoutRunner(func(setGoCoverDir bool, tag string) {
-
- tp := "emitToUnwritableDir"
- rdir, edir := mktestdirs(t, tag, tp, dir)
-
- // Make edir unwritable.
- if err := os.Chmod(edir, 0555); err != nil {
- t.Fatalf("chmod failed: %v", err)
- }
- defer os.Chmod(edir, 0777)
-
- output, err := runHarness(t, harnessPath, tp, setGoCoverDir, rdir, edir)
- if err != nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': %v", tp, err)
- }
- upmergeCoverData(t, edir, "atomic")
- upmergeCoverData(t, rdir, "atomic")
- })
-}
-
-func testEmitToNilWriter(t *testing.T, harnessPath string, dir string) {
- withAndWithoutRunner(func(setGoCoverDir bool, tag string) {
- tp := "emitToNilWriter"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp, setGoCoverDir, rdir, edir)
- if err != nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': %v", tp, err)
- }
- upmergeCoverData(t, edir, "atomic")
- upmergeCoverData(t, rdir, "atomic")
- })
-}
-
-func testEmitToFailingWriter(t *testing.T, harnessPath string, dir string) {
- withAndWithoutRunner(func(setGoCoverDir bool, tag string) {
- tp := "emitToFailingWriter"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp, setGoCoverDir, rdir, edir)
- if err != nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': %v", tp, err)
- }
- upmergeCoverData(t, edir, "atomic")
- upmergeCoverData(t, rdir, "atomic")
- })
-}
-
-func testEmitWithCounterClear(t *testing.T, harnessPath string, dir string) {
- withAndWithoutRunner(func(setGoCoverDir bool, tag string) {
- tp := "emitWithCounterClear"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp,
- setGoCoverDir, rdir, edir)
- if err != nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': %v", tp, err)
- }
- want := []string{tp, "postClear"}
- avoid := []string{"preClear", "main", "final"}
- if msg := testForSpecificFunctions(t, edir, want, avoid); msg != "" {
- t.Logf("%s", output)
- t.Errorf("coverage data from %q output match failed: %s", tp, msg)
- }
- upmergeCoverData(t, edir, "atomic")
- upmergeCoverData(t, rdir, "atomic")
- })
-}
-
-func testEmitToDirNonAtomic(t *testing.T, harnessPath string, naMode string, dir string) {
- tp := "emitToDir"
- tag := "nonatomdir"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp,
- true, rdir, edir)
-
- // We expect an error here.
- if err == nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': did not get expected error", tp)
- }
-
- got := strings.TrimSpace(string(output))
- want := "WriteCountersDir invoked for program built"
- if !strings.Contains(got, want) {
- t.Errorf("running 'harness -tp %s': got:\n%s\nwant: %s",
- tp, got, want)
- }
- upmergeCoverData(t, edir, naMode)
- upmergeCoverData(t, rdir, naMode)
-}
-
-func testEmitToWriterNonAtomic(t *testing.T, harnessPath string, naMode string, dir string) {
- tp := "emitToWriter"
- tag := "nonatomw"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp,
- true, rdir, edir)
-
- // We expect an error here.
- if err == nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s': did not get expected error", tp)
- }
-
- got := strings.TrimSpace(string(output))
- want := "WriteCounters invoked for program built"
- if !strings.Contains(got, want) {
- t.Errorf("running 'harness -tp %s': got:\n%s\nwant: %s",
- tp, got, want)
- }
-
- upmergeCoverData(t, edir, naMode)
- upmergeCoverData(t, rdir, naMode)
-}
-
-func testEmitWithCounterClearNonAtomic(t *testing.T, harnessPath string, naMode string, dir string) {
- tp := "emitWithCounterClear"
- tag := "cclear"
- rdir, edir := mktestdirs(t, tag, tp, dir)
- output, err := runHarness(t, harnessPath, tp,
- true, rdir, edir)
-
- // We expect an error here.
- if err == nil {
- t.Logf("%s", output)
- t.Fatalf("running 'harness -tp %s' nonatomic: did not get expected error", tp)
- }
-
- got := strings.TrimSpace(string(output))
- want := "ClearCounters invoked for program built"
- if !strings.Contains(got, want) {
- t.Errorf("running 'harness -tp %s': got:\n%s\nwant: %s",
- tp, got, want)
- }
-
- upmergeCoverData(t, edir, naMode)
- upmergeCoverData(t, rdir, naMode)
-}
-
-func TestApisOnNocoverBinary(t *testing.T) {
- if testing.Short() {
- t.Skipf("skipping test: too long for short mode")
- }
- testenv.MustHaveGoBuild(t)
- dir := t.TempDir()
-
- // Build harness with no -cover.
- bdir := mkdir(t, filepath.Join(dir, "nocover"))
- edir := mkdir(t, filepath.Join(dir, "emitDirNo"))
- harnessPath := buildHarness(t, bdir, nil)
- output, err := runHarness(t, harnessPath, "emitToDir", false, edir, edir)
- if err == nil {
- t.Fatalf("expected error on TestApisOnNocoverBinary harness run")
- }
- const want = "not built with -cover"
- if !strings.Contains(output, want) {
- t.Errorf("error output does not contain %q: %s", want, output)
- }
-}
-
-func TestIssue56006EmitDataRaceCoverRunningGoroutine(t *testing.T) {
- if testing.Short() {
- t.Skipf("skipping test: too long for short mode")
- }
- if !goexperiment.CoverageRedesign {
- t.Skipf("skipping new coverage tests (experiment not enabled)")
- }
-
- // This test requires "go test -race -cover", meaning that we need
- // go build, go run, and "-race" support.
- testenv.MustHaveGoRun(t)
- if !platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) ||
- !testenv.HasCGO() {
- t.Skip("skipped due to lack of race detector support / CGO")
- }
-
- // This will run a program with -cover and -race where we have a
- // goroutine still running (and updating counters) at the point where
- // the test runtime is trying to write out counter data.
- cmd := exec.Command(testenv.GoToolPath(t), "test", "-cover", "-race")
- cmd.Dir = filepath.Join("testdata", "issue56006")
- b, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("go test -cover -race failed: %v", err)
- }
-
- // Don't want to see any data races in output.
- avoid := []string{"DATA RACE"}
- for _, no := range avoid {
- if strings.Contains(string(b), no) {
- t.Logf("%s\n", string(b))
- t.Fatalf("found %s in test output, not permitted", no)
- }
- }
-}
-
-func TestIssue59563TruncatedCoverPkgAll(t *testing.T) {
- if testing.Short() {
- t.Skipf("skipping test: too long for short mode")
- }
- testenv.MustHaveGoRun(t)
-
- tmpdir := t.TempDir()
- ppath := filepath.Join(tmpdir, "foo.cov")
-
- cmd := exec.Command(testenv.GoToolPath(t), "test", "-coverpkg=all", "-coverprofile="+ppath)
- cmd.Dir = filepath.Join("testdata", "issue59563")
- b, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("go test -cover failed: %v", err)
- }
-
- cmd = exec.Command(testenv.GoToolPath(t), "tool", "cover", "-func="+ppath)
- b, err = cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("go tool cover -func failed: %v", err)
- }
-
- lines := strings.Split(string(b), "\n")
- nfound := 0
- bad := false
- for _, line := range lines {
- f := strings.Fields(line)
- if len(f) == 0 {
- continue
- }
- // We're only interested in the specific function "large" for
- // the testcase being built. See the #59563 for details on why
- // size matters.
- if !(strings.HasPrefix(f[0], "runtime/coverage/testdata/issue59563/repro.go") && strings.Contains(line, "large")) {
- continue
- }
- nfound++
- want := "100.0%"
- if f[len(f)-1] != want {
- t.Errorf("wanted %s got: %q\n", want, line)
- bad = true
- }
- }
- if nfound != 1 {
- t.Errorf("wanted 1 found, got %d\n", nfound)
- bad = true
- }
- if bad {
- t.Logf("func output:\n%s\n", string(b))
- }
-}
diff --git a/src/runtime/coverage/hooks.go b/src/runtime/coverage/hooks.go
deleted file mode 100644
index a9fbf9d7dd..0000000000
--- a/src/runtime/coverage/hooks.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package coverage
-
-import _ "unsafe"
-
-// initHook is invoked from the main package "init" routine in
-// programs built with "-cover". This function is intended to be
-// called only by the compiler.
-//
-// If 'istest' is false, it indicates we're building a regular program
-// ("go build -cover ..."), in which case we immediately try to write
-// out the meta-data file, and register emitCounterData as an exit
-// hook.
-//
-// If 'istest' is true (indicating that the program in question is a
-// Go test binary), then we tentatively queue up both emitMetaData and
-// emitCounterData as exit hooks. In the normal case (e.g. regular "go
-// test -cover" run) the testmain.go boilerplate will run at the end
-// of the test, write out the coverage percentage, and then invoke
-// markProfileEmitted() to indicate that no more work needs to be
-// done. If however that call is never made, this is a sign that the
-// test binary is being used as a replacement binary for the tool
-// being tested, hence we do want to run exit hooks when the program
-// terminates.
-func initHook(istest bool) {
- // Note: hooks are run in reverse registration order, so
- // register the counter data hook before the meta-data hook
- // (in the case where two hooks are needed).
- runOnNonZeroExit := true
- runtime_addExitHook(emitCounterData, runOnNonZeroExit)
- if istest {
- runtime_addExitHook(emitMetaData, runOnNonZeroExit)
- } else {
- emitMetaData()
- }
-}
-
-//go:linkname runtime_addExitHook runtime.addExitHook
-func runtime_addExitHook(f func(), runOnNonZeroExit bool)
diff --git a/src/runtime/coverage/testdata/harness.go b/src/runtime/coverage/testdata/harness.go
deleted file mode 100644
index 03969da426..0000000000
--- a/src/runtime/coverage/testdata/harness.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "flag"
- "fmt"
- "internal/coverage/slicewriter"
- "io"
- "log"
- "os"
- "path/filepath"
- "runtime/coverage"
- "strings"
-)
-
-var verbflag = flag.Int("v", 0, "Verbose trace output level")
-var testpointflag = flag.String("tp", "", "Testpoint to run")
-var outdirflag = flag.String("o", "", "Output dir into which to emit")
-
-func emitToWriter() {
- log.SetPrefix("emitToWriter: ")
- var slwm slicewriter.WriteSeeker
- if err := coverage.WriteMeta(&slwm); err != nil {
- log.Fatalf("error: WriteMeta returns %v", err)
- }
- mf := filepath.Join(*outdirflag, "covmeta.0abcdef")
- if err := os.WriteFile(mf, slwm.BytesWritten(), 0666); err != nil {
- log.Fatalf("error: writing %s: %v", mf, err)
- }
- var slwc slicewriter.WriteSeeker
- if err := coverage.WriteCounters(&slwc); err != nil {
- log.Fatalf("error: WriteCounters returns %v", err)
- }
- cf := filepath.Join(*outdirflag, "covcounters.0abcdef.99.77")
- if err := os.WriteFile(cf, slwc.BytesWritten(), 0666); err != nil {
- log.Fatalf("error: writing %s: %v", cf, err)
- }
-}
-
-func emitToDir() {
- log.SetPrefix("emitToDir: ")
- if err := coverage.WriteMetaDir(*outdirflag); err != nil {
- log.Fatalf("error: WriteMetaDir returns %v", err)
- }
- if err := coverage.WriteCountersDir(*outdirflag); err != nil {
- log.Fatalf("error: WriteCountersDir returns %v", err)
- }
-}
-
-func emitToNonexistentDir() {
- log.SetPrefix("emitToNonexistentDir: ")
-
- want := []string{
- "no such file or directory", // linux-ish
- "system cannot find the file specified", // windows
- "does not exist", // plan9
- }
-
- checkWant := func(which string, got string) {
- found := false
- for _, w := range want {
- if strings.Contains(got, w) {
- found = true
- break
- }
- }
- if !found {
- log.Fatalf("%s emit to bad dir: got error:\n %v\nwanted error with one of:\n %+v", which, got, want)
- }
- }
-
- // Mangle the output directory to produce something nonexistent.
- mangled := *outdirflag + "_MANGLED"
- if err := coverage.WriteMetaDir(mangled); err == nil {
- log.Fatal("expected error from WriteMetaDir to nonexistent dir")
- } else {
- got := fmt.Sprintf("%v", err)
- checkWant("meta data", got)
- }
-
- // Now try to emit counter data file to a bad dir.
- if err := coverage.WriteCountersDir(mangled); err == nil {
- log.Fatal("expected error emitting counter data to bad dir")
- } else {
- got := fmt.Sprintf("%v", err)
- checkWant("counter data", got)
- }
-}
-
-func emitToUnwritableDir() {
- log.SetPrefix("emitToUnwritableDir: ")
-
- want := "permission denied"
-
- if err := coverage.WriteMetaDir(*outdirflag); err == nil {
- log.Fatal("expected error from WriteMetaDir to unwritable dir")
- } else {
- got := fmt.Sprintf("%v", err)
- if !strings.Contains(got, want) {
- log.Fatalf("meta-data emit to unwritable dir: wanted error containing %q got %q", want, got)
- }
- }
-
- // Similarly with writing counter data.
- if err := coverage.WriteCountersDir(*outdirflag); err == nil {
- log.Fatal("expected error emitting counter data to unwritable dir")
- } else {
- got := fmt.Sprintf("%v", err)
- if !strings.Contains(got, want) {
- log.Fatalf("emitting counter data to unwritable dir: wanted error containing %q got %q", want, got)
- }
- }
-}
-
-func emitToNilWriter() {
- log.SetPrefix("emitToWriter: ")
- want := "nil writer"
- var bad io.WriteSeeker
- if err := coverage.WriteMeta(bad); err == nil {
- log.Fatal("expected error passing nil writer for meta emit")
- } else {
- got := fmt.Sprintf("%v", err)
- if !strings.Contains(got, want) {
- log.Fatalf("emitting meta-data passing nil writer: wanted error containing %q got %q", want, got)
- }
- }
-
- if err := coverage.WriteCounters(bad); err == nil {
- log.Fatal("expected error passing nil writer for counter emit")
- } else {
- got := fmt.Sprintf("%v", err)
- if !strings.Contains(got, want) {
- log.Fatalf("emitting counter data passing nil writer: wanted error containing %q got %q", want, got)
- }
- }
-}
-
-type failingWriter struct {
- writeCount int
- writeLimit int
- slws slicewriter.WriteSeeker
-}
-
-func (f *failingWriter) Write(p []byte) (n int, err error) {
- c := f.writeCount
- f.writeCount++
- if f.writeLimit < 0 || c < f.writeLimit {
- return f.slws.Write(p)
- }
- return 0, fmt.Errorf("manufactured write error")
-}
-
-func (f *failingWriter) Seek(offset int64, whence int) (int64, error) {
- return f.slws.Seek(offset, whence)
-}
-
-func (f *failingWriter) reset(lim int) {
- f.writeCount = 0
- f.writeLimit = lim
- f.slws = slicewriter.WriteSeeker{}
-}
-
-func writeStressTest(tag string, testf func(testf *failingWriter) error) {
- // Invoke the function initially without the write limit
- // set, to capture the number of writes performed.
- fw := &failingWriter{writeLimit: -1}
- testf(fw)
-
- // Now that we know how many writes are going to happen, run the
- // function repeatedly, each time with a Write operation set to
- // fail at a new spot. The goal here is to make sure that:
- // A) an error is reported, and B) nothing crashes.
- tot := fw.writeCount
- for i := 0; i < tot; i++ {
- fw.reset(i)
- err := testf(fw)
- if err == nil {
- log.Fatalf("no error from write %d tag %s", i, tag)
- }
- }
-}
-
-func postClear() int {
- return 42
-}
-
-func preClear() int {
- return 42
-}
-
-// This test is designed to ensure that write errors are properly
-// handled by the code that writes out coverage data. It repeatedly
-// invokes the 'emit to writer' apis using a specially crafted writer
-// that captures the total number of expected writes, then replays the
-// execution N times with a manufactured write error at the
-// appropriate spot.
-func emitToFailingWriter() {
- log.SetPrefix("emitToFailingWriter: ")
-
- writeStressTest("emit-meta", func(f *failingWriter) error {
- return coverage.WriteMeta(f)
- })
- writeStressTest("emit-counter", func(f *failingWriter) error {
- return coverage.WriteCounters(f)
- })
-}
-
-func emitWithCounterClear() {
- log.SetPrefix("emitWitCounterClear: ")
- preClear()
- if err := coverage.ClearCounters(); err != nil {
- log.Fatalf("clear failed: %v", err)
- }
- postClear()
- if err := coverage.WriteMetaDir(*outdirflag); err != nil {
- log.Fatalf("error: WriteMetaDir returns %v", err)
- }
- if err := coverage.WriteCountersDir(*outdirflag); err != nil {
- log.Fatalf("error: WriteCountersDir returns %v", err)
- }
-}
-
-func final() int {
- println("I run last.")
- return 43
-}
-
-func main() {
- log.SetFlags(0)
- flag.Parse()
- if *testpointflag == "" {
- log.Fatalf("error: no testpoint (use -tp flag)")
- }
- if *outdirflag == "" {
- log.Fatalf("error: no output dir specified (use -o flag)")
- }
- switch *testpointflag {
- case "emitToDir":
- emitToDir()
- case "emitToWriter":
- emitToWriter()
- case "emitToNonexistentDir":
- emitToNonexistentDir()
- case "emitToUnwritableDir":
- emitToUnwritableDir()
- case "emitToNilWriter":
- emitToNilWriter()
- case "emitToFailingWriter":
- emitToFailingWriter()
- case "emitWithCounterClear":
- emitWithCounterClear()
- default:
- log.Fatalf("error: unknown testpoint %q", *testpointflag)
- }
- final()
-}
diff --git a/src/runtime/coverage/testdata/issue56006/repro.go b/src/runtime/coverage/testdata/issue56006/repro.go
deleted file mode 100644
index 60a4925143..0000000000
--- a/src/runtime/coverage/testdata/issue56006/repro.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package main
-
-//go:noinline
-func blah(x int) int {
- if x != 0 {
- return x + 42
- }
- return x - 42
-}
-
-func main() {
- go infloop()
- println(blah(1) + blah(0))
-}
-
-var G int
-
-func infloop() {
- for {
- G += blah(1)
- G += blah(0)
- if G > 10000 {
- G = 0
- }
- }
-}
diff --git a/src/runtime/coverage/testdata/issue56006/repro_test.go b/src/runtime/coverage/testdata/issue56006/repro_test.go
deleted file mode 100644
index 674d819c3b..0000000000
--- a/src/runtime/coverage/testdata/issue56006/repro_test.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package main
-
-import "testing"
-
-func TestSomething(t *testing.T) {
- go infloop()
- println(blah(1) + blah(0))
-}
diff --git a/src/runtime/coverage/testdata/issue59563/repro.go b/src/runtime/coverage/testdata/issue59563/repro.go
deleted file mode 100644
index d054567dc5..0000000000
--- a/src/runtime/coverage/testdata/issue59563/repro.go
+++ /dev/null
@@ -1,823 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package repro
-
-import (
- "fmt"
- "net/http"
-)
-
-func small() {
- go func() {
- fmt.Println(http.ListenAndServe("localhost:7070", nil))
- }()
-}
-
-func large(x int) int {
- if x == 0 {
- x += 0
- } else if x == 1 {
- x += 1
- } else if x == 2 {
- x += 2
- } else if x == 3 {
- x += 3
- } else if x == 4 {
- x += 4
- } else if x == 5 {
- x += 5
- } else if x == 6 {
- x += 6
- } else if x == 7 {
- x += 7
- } else if x == 8 {
- x += 8
- } else if x == 9 {
- x += 9
- } else if x == 10 {
- x += 10
- } else if x == 11 {
- x += 11
- } else if x == 12 {
- x += 12
- } else if x == 13 {
- x += 13
- } else if x == 14 {
- x += 14
- } else if x == 15 {
- x += 15
- } else if x == 16 {
- x += 16
- } else if x == 17 {
- x += 17
- } else if x == 18 {
- x += 18
- } else if x == 19 {
- x += 19
- } else if x == 20 {
- x += 20
- } else if x == 21 {
- x += 21
- } else if x == 22 {
- x += 22
- } else if x == 23 {
- x += 23
- } else if x == 24 {
- x += 24
- } else if x == 25 {
- x += 25
- } else if x == 26 {
- x += 26
- } else if x == 27 {
- x += 27
- } else if x == 28 {
- x += 28
- } else if x == 29 {
- x += 29
- } else if x == 30 {
- x += 30
- } else if x == 31 {
- x += 31
- } else if x == 32 {
- x += 32
- } else if x == 33 {
- x += 33
- } else if x == 34 {
- x += 34
- } else if x == 35 {
- x += 35
- } else if x == 36 {
- x += 36
- } else if x == 37 {
- x += 37
- } else if x == 38 {
- x += 38
- } else if x == 39 {
- x += 39
- } else if x == 40 {
- x += 40
- } else if x == 41 {
- x += 41
- } else if x == 42 {
- x += 42
- } else if x == 43 {
- x += 43
- } else if x == 44 {
- x += 44
- } else if x == 45 {
- x += 45
- } else if x == 46 {
- x += 46
- } else if x == 47 {
- x += 47
- } else if x == 48 {
- x += 48
- } else if x == 49 {
- x += 49
- } else if x == 50 {
- x += 50
- } else if x == 51 {
- x += 51
- } else if x == 52 {
- x += 52
- } else if x == 53 {
- x += 53
- } else if x == 54 {
- x += 54
- } else if x == 55 {
- x += 55
- } else if x == 56 {
- x += 56
- } else if x == 57 {
- x += 57
- } else if x == 58 {
- x += 58
- } else if x == 59 {
- x += 59
- } else if x == 60 {
- x += 60
- } else if x == 61 {
- x += 61
- } else if x == 62 {
- x += 62
- } else if x == 63 {
- x += 63
- } else if x == 64 {
- x += 64
- } else if x == 65 {
- x += 65
- } else if x == 66 {
- x += 66
- } else if x == 67 {
- x += 67
- } else if x == 68 {
- x += 68
- } else if x == 69 {
- x += 69
- } else if x == 70 {
- x += 70
- } else if x == 71 {
- x += 71
- } else if x == 72 {
- x += 72
- } else if x == 73 {
- x += 73
- } else if x == 74 {
- x += 74
- } else if x == 75 {
- x += 75
- } else if x == 76 {
- x += 76
- } else if x == 77 {
- x += 77
- } else if x == 78 {
- x += 78
- } else if x == 79 {
- x += 79
- } else if x == 80 {
- x += 80
- } else if x == 81 {
- x += 81
- } else if x == 82 {
- x += 82
- } else if x == 83 {
- x += 83
- } else if x == 84 {
- x += 84
- } else if x == 85 {
- x += 85
- } else if x == 86 {
- x += 86
- } else if x == 87 {
- x += 87
- } else if x == 88 {
- x += 88
- } else if x == 89 {
- x += 89
- } else if x == 90 {
- x += 90
- } else if x == 91 {
- x += 91
- } else if x == 92 {
- x += 92
- } else if x == 93 {
- x += 93
- } else if x == 94 {
- x += 94
- } else if x == 95 {
- x += 95
- } else if x == 96 {
- x += 96
- } else if x == 97 {
- x += 97
- } else if x == 98 {
- x += 98
- } else if x == 99 {
- x += 99
- } else if x == 100 {
- x += 100
- } else if x == 101 {
- x += 101
- } else if x == 102 {
- x += 102
- } else if x == 103 {
- x += 103
- } else if x == 104 {
- x += 104
- } else if x == 105 {
- x += 105
- } else if x == 106 {
- x += 106
- } else if x == 107 {
- x += 107
- } else if x == 108 {
- x += 108
- } else if x == 109 {
- x += 109
- } else if x == 110 {
- x += 110
- } else if x == 111 {
- x += 111
- } else if x == 112 {
- x += 112
- } else if x == 113 {
- x += 113
- } else if x == 114 {
- x += 114
- } else if x == 115 {
- x += 115
- } else if x == 116 {
- x += 116
- } else if x == 117 {
- x += 117
- } else if x == 118 {
- x += 118
- } else if x == 119 {
- x += 119
- } else if x == 120 {
- x += 120
- } else if x == 121 {
- x += 121
- } else if x == 122 {
- x += 122
- } else if x == 123 {
- x += 123
- } else if x == 124 {
- x += 124
- } else if x == 125 {
- x += 125
- } else if x == 126 {
- x += 126
- } else if x == 127 {
- x += 127
- } else if x == 128 {
- x += 128
- } else if x == 129 {
- x += 129
- } else if x == 130 {
- x += 130
- } else if x == 131 {
- x += 131
- } else if x == 132 {
- x += 132
- } else if x == 133 {
- x += 133
- } else if x == 134 {
- x += 134
- } else if x == 135 {
- x += 135
- } else if x == 136 {
- x += 136
- } else if x == 137 {
- x += 137
- } else if x == 138 {
- x += 138
- } else if x == 139 {
- x += 139
- } else if x == 140 {
- x += 140
- } else if x == 141 {
- x += 141
- } else if x == 142 {
- x += 142
- } else if x == 143 {
- x += 143
- } else if x == 144 {
- x += 144
- } else if x == 145 {
- x += 145
- } else if x == 146 {
- x += 146
- } else if x == 147 {
- x += 147
- } else if x == 148 {
- x += 148
- } else if x == 149 {
- x += 149
- } else if x == 150 {
- x += 150
- } else if x == 151 {
- x += 151
- } else if x == 152 {
- x += 152
- } else if x == 153 {
- x += 153
- } else if x == 154 {
- x += 154
- } else if x == 155 {
- x += 155
- } else if x == 156 {
- x += 156
- } else if x == 157 {
- x += 157
- } else if x == 158 {
- x += 158
- } else if x == 159 {
- x += 159
- } else if x == 160 {
- x += 160
- } else if x == 161 {
- x += 161
- } else if x == 162 {
- x += 162
- } else if x == 163 {
- x += 163
- } else if x == 164 {
- x += 164
- } else if x == 165 {
- x += 165
- } else if x == 166 {
- x += 166
- } else if x == 167 {
- x += 167
- } else if x == 168 {
- x += 168
- } else if x == 169 {
- x += 169
- } else if x == 170 {
- x += 170
- } else if x == 171 {
- x += 171
- } else if x == 172 {
- x += 172
- } else if x == 173 {
- x += 173
- } else if x == 174 {
- x += 174
- } else if x == 175 {
- x += 175
- } else if x == 176 {
- x += 176
- } else if x == 177 {
- x += 177
- } else if x == 178 {
- x += 178
- } else if x == 179 {
- x += 179
- } else if x == 180 {
- x += 180
- } else if x == 181 {
- x += 181
- } else if x == 182 {
- x += 182
- } else if x == 183 {
- x += 183
- } else if x == 184 {
- x += 184
- } else if x == 185 {
- x += 185
- } else if x == 186 {
- x += 186
- } else if x == 187 {
- x += 187
- } else if x == 188 {
- x += 188
- } else if x == 189 {
- x += 189
- } else if x == 190 {
- x += 190
- } else if x == 191 {
- x += 191
- } else if x == 192 {
- x += 192
- } else if x == 193 {
- x += 193
- } else if x == 194 {
- x += 194
- } else if x == 195 {
- x += 195
- } else if x == 196 {
- x += 196
- } else if x == 197 {
- x += 197
- } else if x == 198 {
- x += 198
- } else if x == 199 {
- x += 199
- } else if x == 200 {
- x += 200
- } else if x == 201 {
- x += 201
- } else if x == 202 {
- x += 202
- } else if x == 203 {
- x += 203
- } else if x == 204 {
- x += 204
- } else if x == 205 {
- x += 205
- } else if x == 206 {
- x += 206
- } else if x == 207 {
- x += 207
- } else if x == 208 {
- x += 208
- } else if x == 209 {
- x += 209
- } else if x == 210 {
- x += 210
- } else if x == 211 {
- x += 211
- } else if x == 212 {
- x += 212
- } else if x == 213 {
- x += 213
- } else if x == 214 {
- x += 214
- } else if x == 215 {
- x += 215
- } else if x == 216 {
- x += 216
- } else if x == 217 {
- x += 217
- } else if x == 218 {
- x += 218
- } else if x == 219 {
- x += 219
- } else if x == 220 {
- x += 220
- } else if x == 221 {
- x += 221
- } else if x == 222 {
- x += 222
- } else if x == 223 {
- x += 223
- } else if x == 224 {
- x += 224
- } else if x == 225 {
- x += 225
- } else if x == 226 {
- x += 226
- } else if x == 227 {
- x += 227
- } else if x == 228 {
- x += 228
- } else if x == 229 {
- x += 229
- } else if x == 230 {
- x += 230
- } else if x == 231 {
- x += 231
- } else if x == 232 {
- x += 232
- } else if x == 233 {
- x += 233
- } else if x == 234 {
- x += 234
- } else if x == 235 {
- x += 235
- } else if x == 236 {
- x += 236
- } else if x == 237 {
- x += 237
- } else if x == 238 {
- x += 238
- } else if x == 239 {
- x += 239
- } else if x == 240 {
- x += 240
- } else if x == 241 {
- x += 241
- } else if x == 242 {
- x += 242
- } else if x == 243 {
- x += 243
- } else if x == 244 {
- x += 244
- } else if x == 245 {
- x += 245
- } else if x == 246 {
- x += 246
- } else if x == 247 {
- x += 247
- } else if x == 248 {
- x += 248
- } else if x == 249 {
- x += 249
- } else if x == 250 {
- x += 250
- } else if x == 251 {
- x += 251
- } else if x == 252 {
- x += 252
- } else if x == 253 {
- x += 253
- } else if x == 254 {
- x += 254
- } else if x == 255 {
- x += 255
- } else if x == 256 {
- x += 256
- } else if x == 257 {
- x += 257
- } else if x == 258 {
- x += 258
- } else if x == 259 {
- x += 259
- } else if x == 260 {
- x += 260
- } else if x == 261 {
- x += 261
- } else if x == 262 {
- x += 262
- } else if x == 263 {
- x += 263
- } else if x == 264 {
- x += 264
- } else if x == 265 {
- x += 265
- } else if x == 266 {
- x += 266
- } else if x == 267 {
- x += 267
- } else if x == 268 {
- x += 268
- } else if x == 269 {
- x += 269
- } else if x == 270 {
- x += 270
- } else if x == 271 {
- x += 271
- } else if x == 272 {
- x += 272
- } else if x == 273 {
- x += 273
- } else if x == 274 {
- x += 274
- } else if x == 275 {
- x += 275
- } else if x == 276 {
- x += 276
- } else if x == 277 {
- x += 277
- } else if x == 278 {
- x += 278
- } else if x == 279 {
- x += 279
- } else if x == 280 {
- x += 280
- } else if x == 281 {
- x += 281
- } else if x == 282 {
- x += 282
- } else if x == 283 {
- x += 283
- } else if x == 284 {
- x += 284
- } else if x == 285 {
- x += 285
- } else if x == 286 {
- x += 286
- } else if x == 287 {
- x += 287
- } else if x == 288 {
- x += 288
- } else if x == 289 {
- x += 289
- } else if x == 290 {
- x += 290
- } else if x == 291 {
- x += 291
- } else if x == 292 {
- x += 292
- } else if x == 293 {
- x += 293
- } else if x == 294 {
- x += 294
- } else if x == 295 {
- x += 295
- } else if x == 296 {
- x += 296
- } else if x == 297 {
- x += 297
- } else if x == 298 {
- x += 298
- } else if x == 299 {
- x += 299
- } else if x == 300 {
- x += 300
- } else if x == 301 {
- x += 301
- } else if x == 302 {
- x += 302
- } else if x == 303 {
- x += 303
- } else if x == 304 {
- x += 304
- } else if x == 305 {
- x += 305
- } else if x == 306 {
- x += 306
- } else if x == 307 {
- x += 307
- } else if x == 308 {
- x += 308
- } else if x == 309 {
- x += 309
- } else if x == 310 {
- x += 310
- } else if x == 311 {
- x += 311
- } else if x == 312 {
- x += 312
- } else if x == 313 {
- x += 313
- } else if x == 314 {
- x += 314
- } else if x == 315 {
- x += 315
- } else if x == 316 {
- x += 316
- } else if x == 317 {
- x += 317
- } else if x == 318 {
- x += 318
- } else if x == 319 {
- x += 319
- } else if x == 320 {
- x += 320
- } else if x == 321 {
- x += 321
- } else if x == 322 {
- x += 322
- } else if x == 323 {
- x += 323
- } else if x == 324 {
- x += 324
- } else if x == 325 {
- x += 325
- } else if x == 326 {
- x += 326
- } else if x == 327 {
- x += 327
- } else if x == 328 {
- x += 328
- } else if x == 329 {
- x += 329
- } else if x == 330 {
- x += 330
- } else if x == 331 {
- x += 331
- } else if x == 332 {
- x += 332
- } else if x == 333 {
- x += 333
- } else if x == 334 {
- x += 334
- } else if x == 335 {
- x += 335
- } else if x == 336 {
- x += 336
- } else if x == 337 {
- x += 337
- } else if x == 338 {
- x += 338
- } else if x == 339 {
- x += 339
- } else if x == 340 {
- x += 340
- } else if x == 341 {
- x += 341
- } else if x == 342 {
- x += 342
- } else if x == 343 {
- x += 343
- } else if x == 344 {
- x += 344
- } else if x == 345 {
- x += 345
- } else if x == 346 {
- x += 346
- } else if x == 347 {
- x += 347
- } else if x == 348 {
- x += 348
- } else if x == 349 {
- x += 349
- } else if x == 350 {
- x += 350
- } else if x == 351 {
- x += 351
- } else if x == 352 {
- x += 352
- } else if x == 353 {
- x += 353
- } else if x == 354 {
- x += 354
- } else if x == 355 {
- x += 355
- } else if x == 356 {
- x += 356
- } else if x == 357 {
- x += 357
- } else if x == 358 {
- x += 358
- } else if x == 359 {
- x += 359
- } else if x == 360 {
- x += 360
- } else if x == 361 {
- x += 361
- } else if x == 362 {
- x += 362
- } else if x == 363 {
- x += 363
- } else if x == 364 {
- x += 364
- } else if x == 365 {
- x += 365
- } else if x == 366 {
- x += 366
- } else if x == 367 {
- x += 367
- } else if x == 368 {
- x += 368
- } else if x == 369 {
- x += 369
- } else if x == 370 {
- x += 370
- } else if x == 371 {
- x += 371
- } else if x == 372 {
- x += 372
- } else if x == 373 {
- x += 373
- } else if x == 374 {
- x += 374
- } else if x == 375 {
- x += 375
- } else if x == 376 {
- x += 376
- } else if x == 377 {
- x += 377
- } else if x == 378 {
- x += 378
- } else if x == 379 {
- x += 379
- } else if x == 380 {
- x += 380
- } else if x == 381 {
- x += 381
- } else if x == 382 {
- x += 382
- } else if x == 383 {
- x += 383
- } else if x == 384 {
- x += 384
- } else if x == 385 {
- x += 385
- } else if x == 386 {
- x += 386
- } else if x == 387 {
- x += 387
- } else if x == 388 {
- x += 388
- } else if x == 389 {
- x += 389
- } else if x == 390 {
- x += 390
- } else if x == 391 {
- x += 391
- } else if x == 392 {
- x += 392
- } else if x == 393 {
- x += 393
- } else if x == 394 {
- x += 394
- } else if x == 395 {
- x += 395
- } else if x == 396 {
- x += 396
- } else if x == 397 {
- x += 397
- } else if x == 398 {
- x += 398
- } else if x == 399 {
- x += 399
- } else if x == 400 {
- x += 400
- }
- return x * x
-}
diff --git a/src/runtime/coverage/testdata/issue59563/repro_test.go b/src/runtime/coverage/testdata/issue59563/repro_test.go
deleted file mode 100644
index 15c8e01a28..0000000000
--- a/src/runtime/coverage/testdata/issue59563/repro_test.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package repro
-
-import "testing"
-
-func TestSomething(t *testing.T) {
- small()
- for i := 0; i < 1001; i++ {
- large(i)
- }
-}
diff --git a/src/runtime/coverage/testsupport.go b/src/runtime/coverage/testsupport.go
deleted file mode 100644
index 4b00f3a0f7..0000000000
--- a/src/runtime/coverage/testsupport.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package coverage
-
-import (
- "encoding/json"
- "fmt"
- "internal/coverage"
- "internal/coverage/calloc"
- "internal/coverage/cformat"
- "internal/coverage/cmerge"
- "internal/coverage/decodecounter"
- "internal/coverage/decodemeta"
- "internal/coverage/pods"
- "internal/runtime/atomic"
- "io"
- "os"
- "path/filepath"
- "strings"
- "unsafe"
-)
-
-// processCoverTestDir is called (via a linknamed reference) from
-// testmain code when "go test -cover" is in effect. It is not
-// intended to be used other than internally by the Go command's
-// generated code.
-func processCoverTestDir(dir string, cfile string, cm string, cpkg string) error {
- return processCoverTestDirInternal(dir, cfile, cm, cpkg, os.Stdout)
-}
-
-// processCoverTestDirInternal is an io.Writer version of processCoverTestDir,
-// exposed for unit testing.
-func processCoverTestDirInternal(dir string, cfile string, cm string, cpkg string, w io.Writer) error {
- cmode := coverage.ParseCounterMode(cm)
- if cmode == coverage.CtrModeInvalid {
- return fmt.Errorf("invalid counter mode %q", cm)
- }
-
- // Emit meta-data and counter data.
- ml := getCovMetaList()
- if len(ml) == 0 {
- // This corresponds to the case where we have a package that
- // contains test code but no functions (which is fine). In this
- // case there is no need to emit anything.
- } else {
- if err := emitMetaDataToDirectory(dir, ml); err != nil {
- return err
- }
- if err := emitCounterDataToDirectory(dir); err != nil {
- return err
- }
- }
-
- // Collect pods from test run. For the majority of cases we would
- // expect to see a single pod here, but allow for multiple pods in
- // case the test harness is doing extra work to collect data files
- // from builds that it kicks off as part of the testing.
- podlist, err := pods.CollectPods([]string{dir}, false)
- if err != nil {
- return fmt.Errorf("reading from %s: %v", dir, err)
- }
-
- // Open text output file if appropriate.
- var tf *os.File
- var tfClosed bool
- if cfile != "" {
- var err error
- tf, err = os.Create(cfile)
- if err != nil {
- return fmt.Errorf("internal error: opening coverage data output file %q: %v", cfile, err)
- }
- defer func() {
- if !tfClosed {
- tfClosed = true
- tf.Close()
- }
- }()
- }
-
- // Read/process the pods.
- ts := &tstate{
- cm: &cmerge.Merger{},
- cf: cformat.NewFormatter(cmode),
- cmode: cmode,
- }
- // Generate the expected hash string based on the final meta-data
- // hash for this test, then look only for pods that refer to that
- // hash (just in case there are multiple instrumented executables
- // in play). See issue #57924 for more on this.
- hashstring := fmt.Sprintf("%x", finalHash)
- importpaths := make(map[string]struct{})
- for _, p := range podlist {
- if !strings.Contains(p.MetaFile, hashstring) {
- continue
- }
- if err := ts.processPod(p, importpaths); err != nil {
- return err
- }
- }
-
- metafilespath := filepath.Join(dir, coverage.MetaFilesFileName)
- if _, err := os.Stat(metafilespath); err == nil {
- if err := ts.readAuxMetaFiles(metafilespath, importpaths); err != nil {
- return err
- }
- }
-
- // Emit percent.
- if err := ts.cf.EmitPercent(w, cpkg, true, true); err != nil {
- return err
- }
-
- // Emit text output.
- if tf != nil {
- if err := ts.cf.EmitTextual(tf); err != nil {
- return err
- }
- tfClosed = true
- if err := tf.Close(); err != nil {
- return fmt.Errorf("closing %s: %v", cfile, err)
- }
- }
-
- return nil
-}
-
-type tstate struct {
- calloc.BatchCounterAlloc
- cm *cmerge.Merger
- cf *cformat.Formatter
- cmode coverage.CounterMode
-}
-
-// processPod reads coverage counter data for a specific pod.
-func (ts *tstate) processPod(p pods.Pod, importpaths map[string]struct{}) error {
- // Open meta-data file
- f, err := os.Open(p.MetaFile)
- if err != nil {
- return fmt.Errorf("unable to open meta-data file %s: %v", p.MetaFile, err)
- }
- defer func() {
- f.Close()
- }()
- var mfr *decodemeta.CoverageMetaFileReader
- mfr, err = decodemeta.NewCoverageMetaFileReader(f, nil)
- if err != nil {
- return fmt.Errorf("error reading meta-data file %s: %v", p.MetaFile, err)
- }
- newmode := mfr.CounterMode()
- if newmode != ts.cmode {
- return fmt.Errorf("internal error: counter mode clash: %q from test harness, %q from data file %s", ts.cmode.String(), newmode.String(), p.MetaFile)
- }
- newgran := mfr.CounterGranularity()
- if err := ts.cm.SetModeAndGranularity(p.MetaFile, cmode, newgran); err != nil {
- return err
- }
-
- // A map to store counter data, indexed by pkgid/fnid tuple.
- pmm := make(map[pkfunc][]uint32)
-
- // Helper to read a single counter data file.
- readcdf := func(cdf string) error {
- cf, err := os.Open(cdf)
- if err != nil {
- return fmt.Errorf("opening counter data file %s: %s", cdf, err)
- }
- defer cf.Close()
- var cdr *decodecounter.CounterDataReader
- cdr, err = decodecounter.NewCounterDataReader(cdf, cf)
- if err != nil {
- return fmt.Errorf("reading counter data file %s: %s", cdf, err)
- }
- var data decodecounter.FuncPayload
- for {
- ok, err := cdr.NextFunc(&data)
- if err != nil {
- return fmt.Errorf("reading counter data file %s: %v", cdf, err)
- }
- if !ok {
- break
- }
-
- // NB: sanity check on pkg and func IDs?
- key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
- if prev, found := pmm[key]; found {
- // Note: no overflow reporting here.
- if err, _ := ts.cm.MergeCounters(data.Counters, prev); err != nil {
- return fmt.Errorf("processing counter data file %s: %v", cdf, err)
- }
- }
- c := ts.AllocateCounters(len(data.Counters))
- copy(c, data.Counters)
- pmm[key] = c
- }
- return nil
- }
-
- // Read counter data files.
- for _, cdf := range p.CounterDataFiles {
- if err := readcdf(cdf); err != nil {
- return err
- }
- }
-
- // Visit meta-data file.
- np := uint32(mfr.NumPackages())
- payload := []byte{}
- for pkIdx := uint32(0); pkIdx < np; pkIdx++ {
- var pd *decodemeta.CoverageMetaDataDecoder
- pd, payload, err = mfr.GetPackageDecoder(pkIdx, payload)
- if err != nil {
- return fmt.Errorf("reading pkg %d from meta-file %s: %s", pkIdx, p.MetaFile, err)
- }
- ts.cf.SetPackage(pd.PackagePath())
- importpaths[pd.PackagePath()] = struct{}{}
- var fd coverage.FuncDesc
- nf := pd.NumFuncs()
- for fnIdx := uint32(0); fnIdx < nf; fnIdx++ {
- if err := pd.ReadFunc(fnIdx, &fd); err != nil {
- return fmt.Errorf("reading meta-data file %s: %v",
- p.MetaFile, err)
- }
- key := pkfunc{pk: pkIdx, fcn: fnIdx}
- counters, haveCounters := pmm[key]
- for i := 0; i < len(fd.Units); i++ {
- u := fd.Units[i]
- // Skip units with non-zero parent (no way to represent
- // these in the existing format).
- if u.Parent != 0 {
- continue
- }
- count := uint32(0)
- if haveCounters {
- count = counters[i]
- }
- ts.cf.AddUnit(fd.Srcfile, fd.Funcname, fd.Lit, u, count)
- }
- }
- }
- return nil
-}
-
-type pkfunc struct {
- pk, fcn uint32
-}
-
-func (ts *tstate) readAuxMetaFiles(metafiles string, importpaths map[string]struct{}) error {
- // Unmarshal the information on available aux metafiles into
- // a MetaFileCollection struct.
- var mfc coverage.MetaFileCollection
- data, err := os.ReadFile(metafiles)
- if err != nil {
- return fmt.Errorf("error reading auxmetafiles file %q: %v", metafiles, err)
- }
- if err := json.Unmarshal(data, &mfc); err != nil {
- return fmt.Errorf("error reading auxmetafiles file %q: %v", metafiles, err)
- }
-
- // Walk through each available aux meta-file. If we've already
- // seen the package path in question during the walk of the
- // "regular" meta-data file, then we can skip the package,
- // otherwise construct a dummy pod with the single meta-data file
- // (no counters) and invoke processPod on it.
- for i := range mfc.ImportPaths {
- p := mfc.ImportPaths[i]
- if _, ok := importpaths[p]; ok {
- continue
- }
- var pod pods.Pod
- pod.MetaFile = mfc.MetaFileFragments[i]
- if err := ts.processPod(pod, importpaths); err != nil {
- return err
- }
- }
- return nil
-}
-
-// snapshot returns a snapshot of coverage percentage at a moment of
-// time within a running test, so as to support the testing.Coverage()
-// function. This version doesn't examine coverage meta-data, so the
-// result it returns will be less accurate (more "slop") due to the
-// fact that we don't look at the meta data to see how many statements
-// are associated with each counter.
-func snapshot() float64 {
- cl := getCovCounterList()
- if len(cl) == 0 {
- // no work to do here.
- return 0.0
- }
-
- tot := uint64(0)
- totExec := uint64(0)
- for _, c := range cl {
- sd := unsafe.Slice((*atomic.Uint32)(unsafe.Pointer(c.Counters)), c.Len)
- tot += uint64(len(sd))
- for i := 0; i < len(sd); i++ {
- // Skip ahead until the next non-zero value.
- if sd[i].Load() == 0 {
- continue
- }
- // We found a function that was executed.
- nCtrs := sd[i+coverage.NumCtrsOffset].Load()
- cst := i + coverage.FirstCtrOffset
-
- if cst+int(nCtrs) > len(sd) {
- break
- }
- counters := sd[cst : cst+int(nCtrs)]
- for i := range counters {
- if counters[i].Load() != 0 {
- totExec++
- }
- }
- i += coverage.FirstCtrOffset + int(nCtrs) - 1
- }
- }
- if tot == 0 {
- return 0.0
- }
- return float64(totExec) / float64(tot)
-}
diff --git a/src/runtime/coverage/ts_test.go b/src/runtime/coverage/ts_test.go
deleted file mode 100644
index b4c6e9716c..0000000000
--- a/src/runtime/coverage/ts_test.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package coverage
-
-import (
- "encoding/json"
- "internal/coverage"
- "internal/goexperiment"
- "internal/testenv"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
- "testing"
- _ "unsafe"
-)
-
-//go:linkname testing_testGoCoverDir testing.testGoCoverDir
-func testing_testGoCoverDir() string
-
-func testGoCoverDir(t *testing.T) string {
- tgcd := testing_testGoCoverDir()
- if tgcd != "" {
- return tgcd
- }
- return t.TempDir()
-}
-
-// TestTestSupport does a basic verification of the functionality in
-// runtime/coverage.processCoverTestDir (doing this here as opposed to
-// relying on other test paths will provide a better signal when
-// running "go test -cover" for this package).
-func TestTestSupport(t *testing.T) {
- if !goexperiment.CoverageRedesign {
- return
- }
- if testing.CoverMode() == "" {
- return
- }
- tgcd := testGoCoverDir(t)
- t.Logf("testing.testGoCoverDir() returns %s mode=%s\n",
- tgcd, testing.CoverMode())
-
- textfile := filepath.Join(t.TempDir(), "file.txt")
- var sb strings.Builder
- err := processCoverTestDirInternal(tgcd, textfile,
- testing.CoverMode(), "", &sb)
- if err != nil {
- t.Fatalf("bad: %v", err)
- }
-
- // Check for existence of text file.
- if inf, err := os.Open(textfile); err != nil {
- t.Fatalf("problems opening text file %s: %v", textfile, err)
- } else {
- inf.Close()
- }
-
- // Check for percent output with expected tokens.
- strout := sb.String()
- want := "of statements"
- if !strings.Contains(strout, want) {
- t.Logf("output from run: %s\n", strout)
- t.Fatalf("percent output missing token: %q", want)
- }
-}
-
-var funcInvoked bool
-
-//go:noinline
-func thisFunctionOnlyCalledFromSnapshotTest(n int) int {
- if funcInvoked {
- panic("bad")
- }
- funcInvoked = true
-
- // Contents here not especially important, just so long as we
- // have some statements.
- t := 0
- for i := 0; i < n; i++ {
- for j := 0; j < i; j++ {
- t += i ^ j
- }
- }
- return t
-}
-
-// Tests runtime/coverage.snapshot() directly. Note that if
-// coverage is not enabled, the hook is designed to just return
-// zero.
-func TestCoverageSnapshot(t *testing.T) {
- C1 := snapshot()
- thisFunctionOnlyCalledFromSnapshotTest(15)
- C2 := snapshot()
- cond := "C1 > C2"
- val := C1 > C2
- if testing.CoverMode() != "" {
- cond = "C1 >= C2"
- val = C1 >= C2
- }
- t.Logf("%f %f\n", C1, C2)
- if val {
- t.Errorf("erroneous snapshots, %s = true C1=%f C2=%f",
- cond, C1, C2)
- }
-}
-
-const hellogo = `
-package main
-
-func main() {
- println("hello")
-}
-`
-
-// Returns a pair F,T where F is a meta-data file generated from
-// "hello.go" above, and T is a token to look for that should be
-// present in the coverage report from F.
-func genAuxMeta(t *testing.T, dstdir string) (string, string) {
- // Do a GOCOVERDIR=<tmp> go run hello.go
- src := filepath.Join(dstdir, "hello.go")
- if err := os.WriteFile(src, []byte(hellogo), 0777); err != nil {
- t.Fatalf("write failed: %v", err)
- }
- args := []string{"run", "-covermode=" + testing.CoverMode(), src}
- cmd := exec.Command(testenv.GoToolPath(t), args...)
- cmd.Env = updateGoCoverDir(os.Environ(), dstdir, true)
- if b, err := cmd.CombinedOutput(); err != nil {
- t.Fatalf("go run failed (%v): %s", err, b)
- }
-
- // Pick out the generated meta-data file.
- files, err := os.ReadDir(dstdir)
- if err != nil {
- t.Fatalf("reading %s: %v", dstdir, err)
- }
- for _, f := range files {
- if strings.HasPrefix(f.Name(), "covmeta") {
- return filepath.Join(dstdir, f.Name()), "hello.go:"
- }
- }
- t.Fatalf("could not locate generated meta-data file")
- return "", ""
-}
-
-func TestAuxMetaDataFiles(t *testing.T) {
- if !goexperiment.CoverageRedesign {
- return
- }
- if testing.CoverMode() == "" {
- return
- }
- testenv.MustHaveGoRun(t)
- tgcd := testGoCoverDir(t)
- t.Logf("testing.testGoCoverDir() returns %s mode=%s\n",
- tgcd, testing.CoverMode())
-
- td := t.TempDir()
-
- // Manufacture a new, separate meta-data file not related to this
- // test. Contents are not important, just so long as the
- // packages/paths are different.
- othermetadir := filepath.Join(td, "othermeta")
- if err := os.Mkdir(othermetadir, 0777); err != nil {
- t.Fatalf("mkdir failed: %v", err)
- }
- mfile, token := genAuxMeta(t, othermetadir)
-
- // Write a metafiles file.
- metafiles := filepath.Join(tgcd, coverage.MetaFilesFileName)
- mfc := coverage.MetaFileCollection{
- ImportPaths: []string{"command-line-arguments"},
- MetaFileFragments: []string{mfile},
- }
- jdata, err := json.Marshal(mfc)
- if err != nil {
- t.Fatalf("marshal MetaFileCollection: %v", err)
- }
- if err := os.WriteFile(metafiles, jdata, 0666); err != nil {
- t.Fatalf("write failed: %v", err)
- }
-
- // Kick off guts of test.
- var sb strings.Builder
- textfile := filepath.Join(td, "file2.txt")
- err = processCoverTestDirInternal(tgcd, textfile,
- testing.CoverMode(), "", &sb)
- if err != nil {
- t.Fatalf("bad: %v", err)
- }
- if err = os.Remove(metafiles); err != nil {
- t.Fatalf("removing metafiles file: %v", err)
- }
-
- // Look for the expected things in the coverage profile.
- contents, err := os.ReadFile(textfile)
- strc := string(contents)
- if err != nil {
- t.Fatalf("problems reading text file %s: %v", textfile, err)
- }
- if !strings.Contains(strc, token) {
- t.Logf("content: %s\n", string(contents))
- t.Fatalf("cov profile does not contain aux meta content %q", token)
- }
-}
diff --git a/src/runtime/covercounter.go b/src/runtime/covercounter.go
index 72842bdd94..6dbc882d16 100644
--- a/src/runtime/covercounter.go
+++ b/src/runtime/covercounter.go
@@ -9,8 +9,8 @@ import (
"unsafe"
)
-//go:linkname runtime_coverage_getCovCounterList runtime/coverage.getCovCounterList
-func runtime_coverage_getCovCounterList() []rtcov.CovCounterBlob {
+//go:linkname coverage_getCovCounterList internal/coverage/cfile.getCovCounterList
+func coverage_getCovCounterList() []rtcov.CovCounterBlob {
res := []rtcov.CovCounterBlob{}
u32sz := unsafe.Sizeof(uint32(0))
for datap := &firstmoduledata; datap != nil; datap = datap.next {
diff --git a/src/runtime/covermeta.go b/src/runtime/covermeta.go
index 54ef42ae1f..57a6b29e91 100644
--- a/src/runtime/covermeta.go
+++ b/src/runtime/covermeta.go
@@ -9,64 +9,12 @@ import (
"unsafe"
)
-// covMeta is the top-level container for bits of state related to
-// code coverage meta-data in the runtime.
-var covMeta struct {
- // metaList contains the list of currently registered meta-data
- // blobs for the running program.
- metaList []rtcov.CovMetaBlob
-
- // pkgMap records mappings from hard-coded package IDs to
- // slots in the covMetaList above.
- pkgMap map[int]int
-
- // Set to true if we discover a package mapping glitch.
- hardCodedListNeedsUpdating bool
-}
-
-// addCovMeta is invoked during package "init" functions by the
-// compiler when compiling for coverage instrumentation; here 'p' is a
-// meta-data blob of length 'dlen' for the package in question, 'hash'
-// is a compiler-computed md5.sum for the blob, 'pkpath' is the
-// package path, 'pkid' is the hard-coded ID that the compiler is
-// using for the package (or -1 if the compiler doesn't think a
-// hard-coded ID is needed), and 'cmode'/'cgran' are the coverage
-// counter mode and granularity requested by the user. Return value is
-// the ID for the package for use by the package code itself.
-func addCovMeta(p unsafe.Pointer, dlen uint32, hash [16]byte, pkpath string, pkid int, cmode uint8, cgran uint8) uint32 {
- slot := len(covMeta.metaList)
- covMeta.metaList = append(covMeta.metaList,
- rtcov.CovMetaBlob{
- P: (*byte)(p),
- Len: dlen,
- Hash: hash,
- PkgPath: pkpath,
- PkgID: pkid,
- CounterMode: cmode,
- CounterGranularity: cgran,
- })
- if pkid != -1 {
- if covMeta.pkgMap == nil {
- covMeta.pkgMap = make(map[int]int)
- }
- if _, ok := covMeta.pkgMap[pkid]; ok {
- throw("runtime.addCovMeta: coverage package map collision")
- }
- // Record the real slot (position on meta-list) for this
- // package; we'll use the map to fix things up later on.
- covMeta.pkgMap[pkid] = slot
+// The compiler emits calls to runtime.addCovMeta
+// but this code has moved to rtcov.AddMeta.
+func addCovMeta(p unsafe.Pointer, dlen uint32, hash [16]byte, pkgpath string, pkgid int, cmode uint8, cgran uint8) uint32 {
+ id := rtcov.AddMeta(p, dlen, hash, pkgpath, pkgid, cmode, cgran)
+ if id == 0 {
+ throw("runtime.addCovMeta: coverage package map collision")
}
-
- // ID zero is reserved as invalid.
- return uint32(slot + 1)
-}
-
-//go:linkname runtime_coverage_getCovMetaList runtime/coverage.getCovMetaList
-func runtime_coverage_getCovMetaList() []rtcov.CovMetaBlob {
- return covMeta.metaList
-}
-
-//go:linkname runtime_coverage_getCovPkgMap runtime/coverage.getCovPkgMap
-func runtime_coverage_getCovPkgMap() map[int]int {
- return covMeta.pkgMap
+ return id
}
diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go
index b2898ba909..100a78258a 100644
--- a/src/runtime/cpuprof.go
+++ b/src/runtime/cpuprof.go
@@ -209,8 +209,17 @@ func CPUProfile() []byte {
panic("CPUProfile no longer available")
}
-//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
-func runtime_pprof_runtime_cyclesPerSecond() int64 {
+// runtime/pprof.runtime_cyclesPerSecond should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/grafana/pyroscope-go/godeltaprof
+// - github.com/pyroscope-io/godeltaprof
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
+func pprof_cyclesPerSecond() int64 {
return ticksPerSecond()
}
@@ -222,6 +231,14 @@ func runtime_pprof_runtime_cyclesPerSecond() int64 {
// The returned data contains a whole number of records, and tags contains
// exactly one entry per record.
//
+// runtime_pprof_readProfile should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/pyroscope-io/pyroscope
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname runtime_pprof_readProfile runtime/pprof.readProfile
func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) {
lock(&cpuprof.lock)
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 19c9cddf36..69e1034ff8 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -11,7 +11,7 @@ import (
"flag"
"fmt"
"internal/testenv"
- tracev2 "internal/trace/v2"
+ traceparse "internal/trace"
"io"
"log"
"os"
@@ -168,7 +168,23 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error)
cmd := exec.Command(testenv.GoToolPath(t), append([]string{"build", "-o", exe}, flags...)...)
t.Logf("running %v", cmd)
cmd.Dir = "testdata/" + binary
- out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
+ cmd = testenv.CleanCmdEnv(cmd)
+
+ // Add the rangefunc GOEXPERIMENT unconditionally since some tests depend on it.
+ // TODO(61405): Remove this once it's enabled by default.
+ edited := false
+ for i := range cmd.Env {
+ e := cmd.Env[i]
+ if _, vars, ok := strings.Cut(e, "GOEXPERIMENT="); ok {
+ cmd.Env[i] = "GOEXPERIMENT=" + vars + ",rangefunc"
+ edited = true
+ }
+ }
+ if !edited {
+ cmd.Env = append(cmd.Env, "GOEXPERIMENT=rangefunc")
+ }
+
+ out, err := cmd.CombinedOutput()
if err != nil {
target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out)
} else {
@@ -900,7 +916,7 @@ func TestCrashWhileTracing(t *testing.T) {
if err := cmd.Start(); err != nil {
t.Fatalf("could not start subprocess: %v", err)
}
- r, err := tracev2.NewReader(stdOut)
+ r, err := traceparse.NewReader(stdOut)
if err != nil {
t.Fatalf("could not create trace.NewReader: %v", err)
}
@@ -918,9 +934,9 @@ loop:
break loop
}
switch ev.Kind() {
- case tracev2.EventSync:
+ case traceparse.EventSync:
seenSync = true
- case tracev2.EventLog:
+ case traceparse.EventLog:
v := ev.Log()
if v.Category == "xyzzy-cat" && v.Message == "xyzzy-msg" {
// Should we already stop reading here? More events may come, but
@@ -966,11 +982,11 @@ func TestPanicWhilePanicking(t *testing.T) {
Func string
}{
{
- "panic while printing panic value: important error message",
+ "panic while printing panic value: important multi-line\n\terror message",
"ErrorPanic",
},
{
- "panic while printing panic value: important stringer message",
+ "panic while printing panic value: important multi-line\n\tstringer message",
"StringerPanic",
},
{
@@ -986,7 +1002,7 @@ func TestPanicWhilePanicking(t *testing.T) {
"CircularPanic",
},
{
- "important string message",
+ "important multi-line\n\tstring message",
"StringPanic",
},
{
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
index 184e4127c3..c477e2b9f6 100644
--- a/src/runtime/debug.go
+++ b/src/runtime/debug.go
@@ -124,3 +124,22 @@ func mayMoreStackMove() {
gp.stackguard0 = stackForceMove
}
}
+
+// debugPinnerKeepUnpin is used to make runtime.(*Pinner).Unpin reachable.
+var debugPinnerKeepUnpin bool = false
+
+// debugPinnerV1 returns a new Pinner that pins itself. This function can be
+// used by debuggers to easily obtain a Pinner that will not be garbage
+// collected (or moved in memory) even if no references to it exist in the
+// target program. This pinner in turn can be used to extend this property
+// to other objects, which debuggers can use to simplify the evaluation of
+// expressions involving multiple call injections.
+func debugPinnerV1() *Pinner {
+ p := new(Pinner)
+ p.Pin(unsafe.Pointer(p))
+ if debugPinnerKeepUnpin {
+ // Make Unpin reachable.
+ p.Unpin()
+ }
+ return p
+}
diff --git a/src/runtime/debug/example_monitor_test.go b/src/runtime/debug/example_monitor_test.go
index 5a1f4e1417..b077e7adb3 100644
--- a/src/runtime/debug/example_monitor_test.go
+++ b/src/runtime/debug/example_monitor_test.go
@@ -91,7 +91,7 @@ func monitor() {
if err != nil {
log.Fatalf("StdinPipe: %v", err)
}
- debug.SetCrashOutput(pipe.(*os.File)) // (this conversion is safe)
+ debug.SetCrashOutput(pipe.(*os.File), debug.CrashOptions{}) // (this conversion is safe)
if err := cmd.Start(); err != nil {
log.Fatalf("can't start monitor: %v", err)
}
diff --git a/src/runtime/debug/garbage.go b/src/runtime/debug/garbage.go
index 4f11c58733..ec74ba0165 100644
--- a/src/runtime/debug/garbage.go
+++ b/src/runtime/debug/garbage.go
@@ -6,7 +6,7 @@ package debug
import (
"runtime"
- "sort"
+ "slices"
"time"
)
@@ -69,7 +69,7 @@ func ReadGCStats(stats *GCStats) {
// See the allocation at the top of the function.
sorted := stats.Pause[n : n+n]
copy(sorted, stats.Pause)
- sort.Slice(sorted, func(i, j int) bool { return sorted[i] < sorted[j] })
+ slices.Sort(sorted)
nq := len(stats.PauseQuantiles) - 1
for i := 0; i < nq; i++ {
stats.PauseQuantiles[i] = sorted[len(sorted)*i/nq]
diff --git a/src/runtime/debug/stack.go b/src/runtime/debug/stack.go
index 8dfea52d34..d7a860b7dc 100644
--- a/src/runtime/debug/stack.go
+++ b/src/runtime/debug/stack.go
@@ -31,6 +31,12 @@ func Stack() []byte {
}
}
+// CrashOptions provides options that control the formatting of the
+// fatal crash message.
+type CrashOptions struct {
+ /* for future expansion */
+}
+
// SetCrashOutput configures a single additional file where unhandled
// panics and other fatal errors are printed, in addition to standard error.
// There is only one additional file: calling SetCrashOutput again overrides
@@ -40,7 +46,7 @@ func Stack() []byte {
// To disable this additional crash output, call SetCrashOutput(nil).
// If called concurrently with a crash, some in-progress output may be written
// to the old file even after an overriding SetCrashOutput returns.
-func SetCrashOutput(f *os.File) error {
+func SetCrashOutput(f *os.File, opts CrashOptions) error {
fd := ^uintptr(0)
if f != nil {
// The runtime will write to this file descriptor from
diff --git a/src/runtime/debug/stack_test.go b/src/runtime/debug/stack_test.go
index 289749ccb4..e1559303f0 100644
--- a/src/runtime/debug/stack_test.go
+++ b/src/runtime/debug/stack_test.go
@@ -13,6 +13,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
+ "runtime/debug"
. "runtime/debug"
"strings"
"testing"
@@ -29,7 +30,7 @@ func TestMain(m *testing.M) {
if err != nil {
log.Fatal(err)
}
- if err := SetCrashOutput(f); err != nil {
+ if err := SetCrashOutput(f, debug.CrashOptions{}); err != nil {
log.Fatal(err) // e.g. EMFILE
}
println("hello")
diff --git a/src/runtime/ehooks_test.go b/src/runtime/ehooks_test.go
index ee286ecb9a..4beb20b0be 100644
--- a/src/runtime/ehooks_test.go
+++ b/src/runtime/ehooks_test.go
@@ -28,32 +28,36 @@ func TestExitHooks(t *testing.T) {
scenarios := []struct {
mode string
expected string
- musthave string
+ musthave []string
}{
{
mode: "simple",
expected: "bar foo",
- musthave: "",
},
{
mode: "goodexit",
expected: "orange apple",
- musthave: "",
},
{
mode: "badexit",
expected: "blub blix",
- musthave: "",
},
{
- mode: "panics",
- expected: "",
- musthave: "fatal error: internal error: exit hook invoked panic",
+ mode: "panics",
+ musthave: []string{
+ "fatal error: exit hook invoked panic",
+ "main.testPanics",
+ },
+ },
+ {
+ mode: "callsexit",
+ musthave: []string{
+ "fatal error: exit hook invoked exit",
+ },
},
{
- mode: "callsexit",
+ mode: "exit2",
expected: "",
- musthave: "fatal error: internal error: exit hook invoked exit",
},
}
@@ -71,20 +75,18 @@ func TestExitHooks(t *testing.T) {
out, _ := cmd.CombinedOutput()
outs := strings.ReplaceAll(string(out), "\n", " ")
outs = strings.TrimSpace(outs)
- if s.expected != "" {
- if s.expected != outs {
- t.Logf("raw output: %q", outs)
- t.Errorf("failed%s mode %s: wanted %q got %q", bt,
- s.mode, s.expected, outs)
- }
- } else if s.musthave != "" {
- if !strings.Contains(outs, s.musthave) {
- t.Logf("raw output: %q", outs)
- t.Errorf("failed mode %s: output does not contain %q",
- s.mode, s.musthave)
+ if s.expected != "" && s.expected != outs {
+ t.Fatalf("failed%s mode %s: wanted %q\noutput:\n%s", bt,
+ s.mode, s.expected, outs)
+ }
+ for _, need := range s.musthave {
+ if !strings.Contains(outs, need) {
+ t.Fatalf("failed mode %s: output does not contain %q\noutput:\n%s",
+ s.mode, need, outs)
}
- } else {
- panic("badly written scenario")
+ }
+ if s.expected == "" && s.musthave == nil && outs != "" {
+ t.Errorf("failed mode %s: wanted no output\noutput:\n%s", s.mode, outs)
}
}
}
diff --git a/src/runtime/env_posix.go b/src/runtime/env_posix.go
index 0eb4f0d7a3..323ce7de9a 100644
--- a/src/runtime/env_posix.go
+++ b/src/runtime/env_posix.go
@@ -42,7 +42,26 @@ func lowerASCII(c byte) byte {
return c
}
-var _cgo_setenv unsafe.Pointer // pointer to C function
+// _cgo_setenv should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ebitengine/purego
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname _cgo_setenv
+var _cgo_setenv unsafe.Pointer // pointer to C function
+
+// _cgo_unsetenv should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ebitengine/purego
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname _cgo_unsetenv
var _cgo_unsetenv unsafe.Pointer // pointer to C function
// Update the C environment if cgo is loaded.
diff --git a/src/runtime/error.go b/src/runtime/error.go
index fe95f31005..406f36ca5f 100644
--- a/src/runtime/error.go
+++ b/src/runtime/error.go
@@ -211,11 +211,16 @@ type stringer interface {
String() string
}
-// printany prints an argument passed to panic.
+// printpanicval prints an argument passed to panic.
// If panic is called with a value that has a String or Error method,
// it has already been converted into a string by preprintpanics.
-func printany(i any) {
- switch v := i.(type) {
+//
+// To ensure that the traceback can be unambiguously parsed even when
+// the panic value contains "\ngoroutine" and other stack-like
+// strings, newlines in the string representation of v are replaced by
+// "\n\t".
+func printpanicval(v any) {
+ switch v := v.(type) {
case nil:
print("nil")
case bool:
@@ -251,19 +256,22 @@ func printany(i any) {
case complex128:
print(v)
case string:
- print(v)
+ printindented(v)
default:
- printanycustomtype(i)
+ printanycustomtype(v)
}
}
+// Invariant: each newline in the string representation is followed by a tab.
func printanycustomtype(i any) {
eface := efaceOf(&i)
typestring := toRType(eface._type).string()
switch eface._type.Kind_ {
case abi.String:
- print(typestring, `("`, *(*string)(eface.data), `")`)
+ print(typestring, `("`)
+ printindented(*(*string)(eface.data))
+ print(`")`)
case abi.Bool:
print(typestring, "(", *(*bool)(eface.data), ")")
case abi.Int:
@@ -301,6 +309,21 @@ func printanycustomtype(i any) {
}
}
+// printindented prints s, replacing "\n" with "\n\t".
+func printindented(s string) {
+ for {
+ i := bytealg.IndexByteString(s, '\n')
+ if i < 0 {
+ break
+ }
+ i += len("\n")
+ print(s[:i])
+ print("\t")
+ s = s[i:]
+ }
+ print(s)
+}
+
// panicwrap generates a panic for a call to a wrapped value method
// with a nil pointer receiver.
//
diff --git a/src/runtime/exithook.go b/src/runtime/exithook.go
deleted file mode 100644
index 65b426b383..0000000000
--- a/src/runtime/exithook.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// addExitHook registers the specified function 'f' to be run at
-// program termination (e.g. when someone invokes os.Exit(), or when
-// main.main returns). Hooks are run in reverse order of registration:
-// first hook added is the last one run.
-//
-// CAREFUL: the expectation is that addExitHook should only be called
-// from a safe context (e.g. not an error/panic path or signal
-// handler, preemption enabled, allocation allowed, write barriers
-// allowed, etc), and that the exit function 'f' will be invoked under
-// similar circumstances. That is the say, we are expecting that 'f'
-// uses normal / high-level Go code as opposed to one of the more
-// restricted dialects used for the trickier parts of the runtime.
-func addExitHook(f func(), runOnNonZeroExit bool) {
- exitHooks.hooks = append(exitHooks.hooks, exitHook{f: f, runOnNonZeroExit: runOnNonZeroExit})
-}
-
-// exitHook stores a function to be run on program exit, registered
-// by the utility runtime.addExitHook.
-type exitHook struct {
- f func() // func to run
- runOnNonZeroExit bool // whether to run on non-zero exit code
-}
-
-// exitHooks stores state related to hook functions registered to
-// run when program execution terminates.
-var exitHooks struct {
- hooks []exitHook
- runningExitHooks bool
-}
-
-// runExitHooks runs any registered exit hook functions (funcs
-// previously registered using runtime.addExitHook). Here 'exitCode'
-// is the status code being passed to os.Exit, or zero if the program
-// is terminating normally without calling os.Exit.
-func runExitHooks(exitCode int) {
- if exitHooks.runningExitHooks {
- throw("internal error: exit hook invoked exit")
- }
- exitHooks.runningExitHooks = true
-
- runExitHook := func(f func()) (caughtPanic bool) {
- defer func() {
- if x := recover(); x != nil {
- caughtPanic = true
- }
- }()
- f()
- return
- }
-
- finishPageTrace()
- for i := range exitHooks.hooks {
- h := exitHooks.hooks[len(exitHooks.hooks)-i-1]
- if exitCode != 0 && !h.runOnNonZeroExit {
- continue
- }
- if caughtPanic := runExitHook(h.f); caughtPanic {
- throw("internal error: exit hook invoked panic")
- }
- }
- exitHooks.hooks = nil
- exitHooks.runningExitHooks = false
-}
diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go
index 810a5a6435..4e0a4ef97e 100644
--- a/src/runtime/export_debug_test.go
+++ b/src/runtime/export_debug_test.go
@@ -8,6 +8,7 @@ package runtime
import (
"internal/abi"
+ "internal/stringslite"
"unsafe"
)
@@ -145,7 +146,7 @@ func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
return false
}
f := findfunc(ctxt.sigpc())
- if !(hasPrefix(funcname(f), "runtime.debugCall") || hasPrefix(funcname(f), "debugCall")) {
+ if !(stringslite.HasPrefix(funcname(f), "runtime.debugCall") || stringslite.HasPrefix(funcname(f), "debugCall")) {
println("trap in unknown function", funcname(f))
return false
}
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index bb2f03b1ce..2019be4dde 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -35,9 +35,6 @@ time.
The GODEBUG variable controls debugging variables within the runtime.
It is a comma-separated list of name=val pairs setting these named variables:
- allocfreetrace: setting allocfreetrace=1 causes every allocation to be
- profiled and a stack trace printed on each object's allocation and free.
-
clobberfree: setting clobberfree=1 causes the garbage collector to
clobber the memory content of an object with bad content when it frees
the object.
@@ -145,6 +142,13 @@ It is a comma-separated list of name=val pairs setting these named variables:
When set to 0 memory profiling is disabled. Refer to the description of
MemProfileRate for the default value.
+ profstackdepth: profstackdepth=128 (the default) will set the maximum stack
+ depth used by all pprof profilers except for the CPU profiler to 128 frames.
+ Stack traces that exceed this limit will be truncated to the limit starting
+ from the leaf frame. Setting profstackdepth to any value above 1024 will
+ silently default to 1024. Future versions of Go may remove this limitation
+ and extend profstackdepth to apply to the CPU profiler and execution tracer.
+
pagetrace: setting pagetrace=/path/to/file will write out a trace of page events
that can be viewed, analyzed, and visualized using the x/debug/cmd/pagetrace tool.
Build your program with GOEXPERIMENT=pagetrace to enable this functionality. Do not
@@ -198,9 +202,8 @@ It is a comma-separated list of name=val pairs setting these named variables:
tracebackancestors: setting tracebackancestors=N extends tracebacks with the stacks at
which goroutines were created, where N limits the number of ancestor goroutines to
- report. This also extends the information returned by runtime.Stack. Ancestor's goroutine
- IDs will refer to the ID of the goroutine at the time of creation; it's possible for this
- ID to be reused for another goroutine. Setting N to 0 will report no ancestry information.
+ report. This also extends the information returned by runtime.Stack.
+ Setting N to 0 will report no ancestry information.
tracefpunwindoff: setting tracefpunwindoff=1 forces the execution tracer to
use the runtime's default stack unwinder instead of frame pointer unwinding.
diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go
index 9451a1b608..908f632246 100644
--- a/src/runtime/gc_test.go
+++ b/src/runtime/gc_test.go
@@ -6,12 +6,13 @@ package runtime_test
import (
"fmt"
+ "math/bits"
"math/rand"
"os"
"reflect"
"runtime"
"runtime/debug"
- "sort"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -278,8 +279,17 @@ func TestGCTestIsReachable(t *testing.T) {
}
got := runtime.GCTestIsReachable(all...)
- if want != got {
- t.Fatalf("did not get expected reachable set; want %b, got %b", want, got)
+ if got&want != want {
+ // This is a serious bug - an object is live (due to the KeepAlive
+ // call below), but isn't reported as such.
+ t.Fatalf("live object not in reachable set; want %b, got %b", want, got)
+ }
+ if bits.OnesCount64(got&^want) > 1 {
+ // Note: we can occasionally have a value that is retained even though
+ // it isn't live, due to conservative scanning of stack frames.
+ // See issue 67204. For now, we allow a "slop" of 1 unintentionally
+ // retained object.
+ t.Fatalf("dead object in reachable set; want %b, got %b", want, got)
}
runtime.KeepAlive(half)
}
@@ -548,9 +558,7 @@ func BenchmarkReadMemStatsLatency(b *testing.B) {
b.ReportMetric(0, "allocs/op")
// Sort latencies then report percentiles.
- sort.Slice(latencies, func(i, j int) bool {
- return latencies[i] < latencies[j]
- })
+ slices.Sort(latencies)
b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
diff --git a/src/runtime/hash_test.go b/src/runtime/hash_test.go
index 36207e7ed0..24c04b260e 100644
--- a/src/runtime/hash_test.go
+++ b/src/runtime/hash_test.go
@@ -143,6 +143,7 @@ func TestSmhasherSmallKeys(t *testing.T) {
if race.Enabled {
t.Skip("Too long for race mode")
}
+ testenv.ParallelOn64Bit(t)
h := newHashSet()
var b [3]byte
for i := 0; i < 256; i++ {
@@ -164,6 +165,7 @@ func TestSmhasherSmallKeys(t *testing.T) {
// Different length strings of all zeros have distinct hashes.
func TestSmhasherZeros(t *testing.T) {
+ t.Parallel()
N := 256 * 1024
if testing.Short() {
N = 1024
@@ -187,6 +189,7 @@ func TestSmhasherTwoNonzero(t *testing.T) {
if race.Enabled {
t.Skip("Too long for race mode")
}
+ testenv.ParallelOn64Bit(t)
h := newHashSet()
for n := 2; n <= 16; n++ {
twoNonZero(h, n)
@@ -232,6 +235,7 @@ func TestSmhasherCyclic(t *testing.T) {
if race.Enabled {
t.Skip("Too long for race mode")
}
+ t.Parallel()
r := rand.New(rand.NewSource(1234))
const REPEAT = 8
const N = 1000000
@@ -261,6 +265,7 @@ func TestSmhasherSparse(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
+ t.Parallel()
h := newHashSet()
sparse(t, h, 32, 6)
sparse(t, h, 40, 6)
@@ -302,6 +307,7 @@ func TestSmhasherPermutation(t *testing.T) {
if race.Enabled {
t.Skip("Too long for race mode")
}
+ testenv.ParallelOn64Bit(t)
h := newHashSet()
permutation(t, h, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8)
permutation(t, h, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8)
@@ -475,6 +481,7 @@ func TestSmhasherAvalanche(t *testing.T) {
if race.Enabled {
t.Skip("Too long for race mode")
}
+ t.Parallel()
avalancheTest1(t, &BytesKey{make([]byte, 2)})
avalancheTest1(t, &BytesKey{make([]byte, 4)})
avalancheTest1(t, &BytesKey{make([]byte, 8)})
@@ -545,6 +552,7 @@ func TestSmhasherWindowed(t *testing.T) {
if race.Enabled {
t.Skip("Too long for race mode")
}
+ t.Parallel()
h := newHashSet()
t.Logf("32 bit keys")
windowed(t, h, &Int32Key{})
@@ -588,6 +596,7 @@ func TestSmhasherText(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
+ t.Parallel()
h := newHashSet()
text(t, h, "Foo", "Bar")
text(t, h, "FooBar", "")
@@ -798,6 +807,7 @@ func TestCollisions(t *testing.T) {
if testing.Short() {
t.Skip("Skipping in short mode")
}
+ t.Parallel()
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
if j == i {
diff --git a/src/runtime/iface.go b/src/runtime/iface.go
index e280180665..41a10ae012 100644
--- a/src/runtime/iface.go
+++ b/src/runtime/iface.go
@@ -32,6 +32,15 @@ func itabHashFunc(inter *interfacetype, typ *_type) uintptr {
return uintptr(inter.Type.Hash ^ typ.Hash)
}
+// getitab should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname getitab
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
if len(inter.Methods) == 0 {
throw("internal error - misuse of itab")
@@ -379,6 +388,15 @@ func convT32(val uint32) (x unsafe.Pointer) {
return
}
+// convT64 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname convT64
func convT64(val uint64) (x unsafe.Pointer) {
if val < uint64(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
@@ -389,6 +407,15 @@ func convT64(val uint64) (x unsafe.Pointer) {
return
}
+// convTstring should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname convTstring
func convTstring(val string) (x unsafe.Pointer) {
if val == "" {
x = unsafe.Pointer(&zeroVal[0])
@@ -399,6 +426,15 @@ func convTstring(val string) (x unsafe.Pointer) {
return
}
+// convTslice should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname convTslice
func convTslice(val []byte) (x unsafe.Pointer) {
// Note: this must work for any element type, not just byte.
if (*slice)(unsafe.Pointer(&val)).array == nil {
@@ -624,6 +660,15 @@ func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_
// causes a cache lookup to fail immediately.)
var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
+// reflect_ifaceE2I is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/modern-go/reflect2
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+//
//go:linkname reflect_ifaceE2I reflect.ifaceE2I
func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
*dst = iface{assertE2I(inter, e._type), e.data}
diff --git a/src/runtime/linkname.go b/src/runtime/linkname.go
new file mode 100644
index 0000000000..dd7f674251
--- /dev/null
+++ b/src/runtime/linkname.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import _ "unsafe"
+
+// used in internal/godebug and syscall
+//go:linkname write
+
+// used by cgo
+//go:linkname _cgo_panic_internal
+//go:linkname cgoAlwaysFalse
+//go:linkname cgoUse
+//go:linkname cgoCheckPointer
+//go:linkname cgoCheckResult
+//go:linkname cgoNoCallback
+//go:linkname gobytes
+//go:linkname gostringn
+
+// used in plugin
+//go:linkname doInit
+
+// used in math/bits
+//go:linkname overflowError
+//go:linkname divideError
+
+// used in tests
+//go:linkname extraMInUse
+//go:linkname blockevent
+//go:linkname haveHighResSleep
+//go:linkname blockUntilEmptyFinalizerQueue
+//go:linkname lockedOSThread
diff --git a/src/runtime/linkname_unix.go b/src/runtime/linkname_unix.go
new file mode 100644
index 0000000000..65f876fa4b
--- /dev/null
+++ b/src/runtime/linkname_unix.go
@@ -0,0 +1,12 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package runtime
+
+import _ "unsafe"
+
+// used in internal/syscall/unix
+//go:linkname fcntl
diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go
index 33b0387686..432ace728b 100644
--- a/src/runtime/lockrank.go
+++ b/src/runtime/lockrank.go
@@ -50,6 +50,7 @@ const (
lockRankFin
lockRankSpanSetSpine
lockRankMspanSpecial
+ lockRankTraceTypeTab
// MPROF
lockRankGcBitsArenas
lockRankProfInsert
@@ -119,6 +120,7 @@ var lockNames = []string{
lockRankFin: "fin",
lockRankSpanSetSpine: "spanSetSpine",
lockRankMspanSpecial: "mspanSpecial",
+ lockRankTraceTypeTab: "traceTypeTab",
lockRankGcBitsArenas: "gcBitsArenas",
lockRankProfInsert: "profInsert",
lockRankProfBlock: "profBlock",
@@ -197,6 +199,7 @@ var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 48cace9171..b92a213245 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -965,6 +965,21 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bo
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
+//
+// mallocgc should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/gopkg
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/cockroachdb/cockroach
+// - github.com/cockroachdb/pebble
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mallocgc
func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
@@ -1261,14 +1276,18 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
if debug.malloc {
- if debug.allocfreetrace != 0 {
- tracealloc(x, size, typ)
- }
-
if inittrace.active && inittrace.id == getg().goid {
// Init functions are executed sequentially in a single goroutine.
inittrace.bytes += uint64(fullSize)
}
+
+ if traceAllocFreeEnabled() {
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.HeapObjectAlloc(uintptr(x), typ)
+ traceRelease(trace)
+ }
+ }
}
if assistG != nil {
@@ -1367,6 +1386,17 @@ func newobject(typ *_type) unsafe.Pointer {
return mallocgc(typ.Size_, typ, true)
}
+// reflect_unsafe_New is meant for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/goccy/json
+// - github.com/modern-go/reflect2
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_unsafe_New reflect.unsafe_New
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
return mallocgc(typ.Size_, typ, true)
@@ -1378,6 +1408,18 @@ func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
}
// newarray allocates an array of n elements of type typ.
+//
+// newarray should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/RomiChan/protobuf
+// - github.com/segmentio/encoding
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname newarray
func newarray(typ *_type, n int) unsafe.Pointer {
if n == 1 {
return mallocgc(typ.Size_, typ, true)
@@ -1389,6 +1431,20 @@ func newarray(typ *_type, n int) unsafe.Pointer {
return mallocgc(mem, typ, true)
}
+// reflect_unsafe_NewArray is meant for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/bytedance/sonic
+// - github.com/goccy/json
+// - github.com/modern-go/reflect2
+// - github.com/segmentio/encoding
+// - github.com/segmentio/kafka-go
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
return newarray(typ, n)
@@ -1400,7 +1456,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
throw("profilealloc called without a P or outside bootstrapping")
}
c.nextSample = nextSample()
- mProf_Malloc(x, size)
+ mProf_Malloc(mp, x, size)
}
// nextSample returns the next sampling point for heap profiling. The goal is
diff --git a/src/runtime/map.go b/src/runtime/map.go
index d97e209deb..112084f5a7 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -283,6 +283,16 @@ func makemap64(t *maptype, hint int64, h *hmap) *hmap {
// makemap_small implements Go map creation for make(map[k]v) and
// make(map[k]v, hint) when hint is known to be at most bucketCnt
// at compile time and the map needs to be allocated on the heap.
+//
+// makemap_small should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname makemap_small
func makemap_small() *hmap {
h := new(hmap)
h.hash0 = uint32(rand())
@@ -294,6 +304,17 @@ func makemap_small() *hmap {
// can be created on the stack, h and/or bucket may be non-nil.
// If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket.
+//
+// makemap should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname makemap
func makemap(t *maptype, hint int, h *hmap) *hmap {
mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
if overflow || mem > maxAlloc {
@@ -446,6 +467,15 @@ bucketloop:
return unsafe.Pointer(&zeroVal[0])
}
+// mapaccess2 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapaccess2
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -568,6 +598,20 @@ func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Point
}
// Like mapaccess, but allocates a slot for the key if it is not present in the map.
+//
+// mapassign should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/RomiChan/protobuf
+// - github.com/segmentio/encoding
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapassign
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
@@ -685,6 +729,15 @@ done:
return elem
}
+// mapdelete should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapdelete
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -805,6 +858,22 @@ search:
// The hiter struct pointed to by 'it' is allocated on the stack
// by the compilers order pass or on the heap by reflect_mapiterinit.
// Both need to have zeroed hiter since the struct contains pointers.
+//
+// mapiterinit should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/goccy/go-json
+// - github.com/RomiChan/protobuf
+// - github.com/segmentio/encoding
+// - github.com/ugorji/go/codec
+// - github.com/wI2L/jettison
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapiterinit
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -851,6 +920,20 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
mapiternext(it)
}
+// mapiternext should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/RomiChan/protobuf
+// - github.com/segmentio/encoding
+// - github.com/ugorji/go/codec
+// - gonum.org/v1/gonum
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapiternext
func mapiternext(it *hiter) {
h := it.h
if raceenabled {
@@ -978,6 +1061,17 @@ next:
}
// mapclear deletes all keys from a map.
+// It is called by the compiler.
+//
+// mapclear should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapclear
func mapclear(t *maptype, h *hmap) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -1293,6 +1387,19 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
// Reflect stubs. Called from ../reflect/asm_*.s
+// reflect_makemap is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/modern-go/reflect2
+// - github.com/goccy/go-json
+// - github.com/RomiChan/protobuf
+// - github.com/segmentio/encoding
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap {
// Check invariants and reflects math.
@@ -1332,6 +1439,16 @@ func reflect_makemap(t *maptype, cap int) *hmap {
return makemap(t, cap, nil)
}
+// reflect_mapaccess is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/modern-go/reflect2
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_mapaccess reflect.mapaccess
func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
elem, ok := mapaccess2(t, h, key)
@@ -1352,6 +1469,14 @@ func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
return elem
}
+// reflect_mapassign is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+//
//go:linkname reflect_mapassign reflect.mapassign0
func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
p := mapassign(t, h, key)
@@ -1374,26 +1499,76 @@ func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
mapdelete_faststr(t, h, key)
}
+// reflect_mapiterinit is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/modern-go/reflect2
+// - gitee.com/quant1x/gox
+// - github.com/v2pro/plz
+// - github.com/wI2L/jettison
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_mapiterinit reflect.mapiterinit
func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
mapiterinit(t, h, it)
}
+// reflect_mapiternext is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/modern-go/reflect2
+// - github.com/goccy/go-json
+// - github.com/v2pro/plz
+// - github.com/wI2L/jettison
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_mapiternext reflect.mapiternext
func reflect_mapiternext(it *hiter) {
mapiternext(it)
}
+// reflect_mapiterkey is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/goccy/go-json
+// - gonum.org/v1/gonum
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_mapiterkey reflect.mapiterkey
func reflect_mapiterkey(it *hiter) unsafe.Pointer {
return it.key
}
+// reflect_mapiterelem is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/goccy/go-json
+// - gonum.org/v1/gonum
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_mapiterelem reflect.mapiterelem
func reflect_mapiterelem(it *hiter) unsafe.Pointer {
return it.elem
}
+// reflect_maplen is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/goccy/go-json
+// - github.com/wI2L/jettison
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_maplen reflect.maplen
func reflect_maplen(h *hmap) int {
if h == nil {
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index 7e52240e77..0eb8562f51 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -50,6 +50,15 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0])
}
+// mapaccess2_fast32 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapaccess2_fast32
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -90,6 +99,17 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false
}
+// mapassign_fast32 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapassign_fast32
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
@@ -180,6 +200,15 @@ done:
return elem
}
+// mapassign_fast32ptr should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapassign_fast32ptr
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index 2c365183cb..aca60eb2a8 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -50,6 +50,15 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0])
}
+// mapaccess2_fast64 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapaccess2_fast64
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -90,6 +99,17 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false
}
+// mapassign_fast64 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapassign_fast64
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
@@ -180,6 +200,17 @@ done:
return elem
}
+// mapassign_fast64ptr should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapassign_fast64ptr
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index d989190f71..5461a9f81e 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -105,6 +105,15 @@ dohash:
return unsafe.Pointer(&zeroVal[0])
}
+// mapaccess2_faststr should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapaccess2_faststr
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -200,6 +209,17 @@ dohash:
return unsafe.Pointer(&zeroVal[0]), false
}
+// mapassign_faststr should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname mapassign_faststr
func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go
index c29fb933ee..13624e0938 100644
--- a/src/runtime/map_test.go
+++ b/src/runtime/map_test.go
@@ -13,7 +13,7 @@ import (
"os"
"reflect"
"runtime"
- "sort"
+ "slices"
"strconv"
"strings"
"sync"
@@ -388,8 +388,8 @@ func TestBigItems(t *testing.T) {
values[i] = v[37]
i++
}
- sort.Strings(keys[:])
- sort.Strings(values[:])
+ slices.Sort(keys[:])
+ slices.Sort(values[:])
for i := 0; i < 100; i++ {
if keys[i] != fmt.Sprintf("string%02d", i) {
t.Errorf("#%d: missing key: %v", i, keys[i])
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index dc6922da54..7dc8a1a5e5 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -148,6 +148,16 @@ import (
// TODO: Perfect for go:nosplitrec since we can't have a safe point
// anywhere in the bulk barrier or memmove.
//
+// typedmemmove should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/RomiChan/protobuf
+// - github.com/segmentio/encoding
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname typedmemmove
//go:nosplit
func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) {
if dst == src {
@@ -199,6 +209,18 @@ func wbMove(typ *_type, dst, src unsafe.Pointer) {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ)
}
+// reflect_typedmemmove is meant for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/goccy/json
+// - github.com/modern-go/reflect2
+// - github.com/ugorji/go/codec
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
@@ -248,6 +270,15 @@ func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *ab
}
}
+// typedslicecopy should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/segmentio/encoding
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname typedslicecopy
//go:nosplit
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
n := dstLen
@@ -303,6 +334,18 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe
return n
}
+// reflect_typedslicecopy is meant for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/modern-go/reflect2
+// - github.com/RomiChan/protobuf
+// - github.com/segmentio/encoding
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
if !elemType.Pointers() {
@@ -332,6 +375,14 @@ func typedmemclr(typ *_type, ptr unsafe.Pointer) {
memclrNoHeapPointers(ptr, typ.Size_)
}
+// reflect_typedslicecopy is meant for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_typedmemclr reflect.typedmemclr
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
typedmemclr(typ, ptr)
@@ -365,6 +416,15 @@ func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
// pointers, usually by checking typ.PtrBytes. However, ptr
// does not have to point to the start of the allocation.
//
+// memclrHasPointers should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname memclrHasPointers
//go:nosplit
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
// Pass nil for the type since we don't have one here anyway.
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index e7a712377b..689fac103c 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -1261,6 +1261,15 @@ func badPointer(s *mspan, p, refBase, refOff uintptr) {
// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
// Since p is a uintptr, it would not be adjusted if the stack were to move.
//
+// findObject should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname findObject
//go:nosplit
func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
s = spanOf(p)
diff --git a/src/runtime/memclr_loong64.s b/src/runtime/memclr_loong64.s
index 313e4d4f33..1d45e82d49 100644
--- a/src/runtime/memclr_loong64.s
+++ b/src/runtime/memclr_loong64.s
@@ -7,10 +7,6 @@
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB),NOSPLIT,$0-16
-#ifndef GOEXPERIMENT_regabiargs
- MOVV ptr+0(FP), R4
- MOVV n+8(FP), R5
-#endif
ADDV R4, R5, R6
// if less than 8 bytes, do one byte at a time
diff --git a/src/runtime/memmove_loong64.s b/src/runtime/memmove_loong64.s
index 5b7aeba698..a94cf999bc 100644
--- a/src/runtime/memmove_loong64.s
+++ b/src/runtime/memmove_loong64.s
@@ -8,11 +8,6 @@
// func memmove(to, from unsafe.Pointer, n uintptr)
TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24
-#ifndef GOEXPERIMENT_regabiargs
- MOVV to+0(FP), R4
- MOVV from+8(FP), R5
- MOVV n+16(FP), R6
-#endif
BNE R6, check
RET
diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go
index fbbeb1a475..c89e176986 100644
--- a/src/runtime/metrics/doc.go
+++ b/src/runtime/metrics/doc.go
@@ -302,6 +302,10 @@ Below is the full list of supported metrics, ordered lexicographically.
The number of non-default behaviors executed by the crypto/tls
package due to a non-default GODEBUG=tls10server=... setting.
+ /godebug/non-default-behavior/tls3des:events
+ The number of non-default behaviors executed by the crypto/tls
+ package due to a non-default GODEBUG=tls3des=... setting.
+
/godebug/non-default-behavior/tlsmaxrsasize:events
The number of non-default behaviors executed by the crypto/tls
package due to a non-default GODEBUG=tlsmaxrsasize=... setting.
@@ -322,6 +326,21 @@ Below is the full list of supported metrics, ordered lexicographically.
The number of non-default behaviors executed by the os package
due to a non-default GODEBUG=winsymlink=... setting.
+ /godebug/non-default-behavior/x509keypairleaf:events
+ The number of non-default behaviors executed by the crypto/tls
+ package due to a non-default GODEBUG=x509keypairleaf=...
+ setting.
+
+ /godebug/non-default-behavior/x509negativeserial:events
+ The number of non-default behaviors executed by the crypto/x509
+ package due to a non-default GODEBUG=x509negativeserial=...
+ setting.
+
+ /godebug/non-default-behavior/x509seriallength:events
+ The number of non-default behaviors executed by the crypto/x509
+ package due to a non-default GODEBUG=x509seriallength=...
+ setting.
+
/godebug/non-default-behavior/x509sha1:events
The number of non-default behaviors executed by the crypto/x509
package due to a non-default GODEBUG=x509sha1=... setting.
diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go
index 859bc712f1..ebbf0e4fd0 100644
--- a/src/runtime/metrics_test.go
+++ b/src/runtime/metrics_test.go
@@ -7,6 +7,7 @@ package runtime_test
import (
"bytes"
"fmt"
+ "internal/abi"
"internal/goexperiment"
"internal/profile"
"internal/testenv"
@@ -1047,6 +1048,13 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
if metricGrowth == 0 && strictTiming {
// If the critical section is very short, systems with low timer
// resolution may be unable to measure it via nanotime.
+ //
+ // This is sampled at 1 per gTrackingPeriod, but the explicit
+ // runtime.mutex tests create 200 contention events. Observing
+ // zero of those has a probability of (7/8)^200 = 2.5e-12 which
+ // is acceptably low (though the calculation has a tenuous
+ // dependency on cheaprandn being a good-enough source of
+ // entropy).
t.Errorf("no increase in /sync/mutex/wait/total:seconds metric")
}
// This comparison is possible because the time measurements in support of
@@ -1112,7 +1120,7 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
name := t.Name()
t.Run("runtime.lock", func(t *testing.T) {
- mus := make([]runtime.Mutex, 100)
+ mus := make([]runtime.Mutex, 200)
var needContention atomic.Int64
delay := 100 * time.Microsecond // large relative to system noise, for comparison between clocks
delayMicros := delay.Microseconds()
@@ -1167,13 +1175,19 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
needContention.Store(int64(len(mus) - 1))
metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t)
- if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); have < want {
- // The test imposes a delay with usleep, verified with calls to
- // nanotime. Compare against the runtime/metrics package's view
- // (based on nanotime) rather than runtime/pprof's view (based
- // on cputicks).
- t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want)
- }
+ t.Run("metric", func(t *testing.T) {
+ // The runtime/metrics view may be sampled at 1 per
+ // gTrackingPeriod, so we don't have a hard lower bound here.
+ testenv.SkipFlaky(t, 64253)
+
+ if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); have < want {
+ // The test imposes a delay with usleep, verified with calls to
+ // nanotime. Compare against the runtime/metrics package's view
+ // (based on nanotime) rather than runtime/pprof's view (based
+ // on cputicks).
+ t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want)
+ }
+ })
if have, want := n, int64(len(mus)); have != want {
t.Errorf("mutex profile reported contention count different from the known true count (%d != %d)", have, want)
}
@@ -1331,3 +1345,38 @@ func TestCPUStats(t *testing.T) {
t.Error("idle time is zero")
}
}
+
+func TestMetricHeapUnusedLargeObjectOverflow(t *testing.T) {
+ // This test makes sure /memory/classes/heap/unused:bytes
+ // doesn't overflow when allocating and deallocating large
+ // objects. It is a regression test for #67019.
+ done := make(chan struct{})
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ for range 10 {
+ abi.Escape(make([]byte, 1<<20))
+ }
+ runtime.GC()
+ select {
+ case <-done:
+ return
+ default:
+ }
+ }
+ }()
+ s := []metrics.Sample{
+ {Name: "/memory/classes/heap/unused:bytes"},
+ }
+ for range 1000 {
+ metrics.Read(s)
+ if s[0].Value.Uint64() > 1<<40 {
+ t.Errorf("overflow")
+ break
+ }
+ }
+ done <- struct{}{}
+ wg.Wait()
+}
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index 9dcafb427f..78313fb74c 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -377,9 +377,11 @@ func blockUntilEmptyFinalizerQueue(timeout int64) bool {
// In order to use finalizers correctly, the program must ensure that
// the object is reachable until it is no longer required.
// Objects stored in global variables, or that can be found by tracing
-// pointers from a global variable, are reachable. For other objects,
-// pass the object to a call of the [KeepAlive] function to mark the
-// last point in the function where the object must be reachable.
+// pointers from a global variable, are reachable. A function argument or
+// receiver may become unreachable at the last point where the function
+// mentions it. To make an unreachable object reachable, pass the object
+// to a call of the [KeepAlive] function to mark the last point in the
+// function where the object must be reachable.
//
// For example, if p points to a struct, such as os.File, that contains
// a file descriptor d, and p has a finalizer that closes that file
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 83afd55c47..2654c69658 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -215,6 +215,17 @@ var gcphase uint32
// If you change it, you must change builtin/runtime.go, too.
// If you change the first four bytes, you must also change the write
// barrier insertion code.
+//
+// writeBarrier should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname writeBarrier
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
pad [3]byte // compiler uses 32-bit load for "enabled" field
@@ -1510,10 +1521,6 @@ func gcMarkWorkAvailable(p *p) bool {
// All gcWork caches must be empty.
// STW is in effect at this point.
func gcMark(startTime int64) {
- if debug.allocfreetrace > 0 {
- tracegc()
- }
-
if gcphase != _GCmarktermination {
throw("in gcMark expecting to see gcphase as _GCmarktermination")
}
@@ -1697,6 +1704,15 @@ var poolcleanup func()
var boringCaches []unsafe.Pointer // for crypto/internal/boring
var uniqueMapCleanup chan struct{} // for unique
+// sync_runtime_registerPoolCleanup should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/gopkg
+// - github.com/songzhibin97/gkit
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
func sync_runtime_registerPoolCleanup(f func()) {
poolcleanup = f
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index 8102940a7c..4f0bd9c28d 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -773,8 +773,6 @@ func (p *pageAlloc) scavengeOne(ci chunkIdx, searchIdx uint, max uintptr) uintpt
unlock(p.mheapLock)
if !p.test {
- pageTraceScav(getg().m.p.ptr(), 0, addr, uintptr(npages))
-
// Only perform sys* operations if we're not in a test.
// It's dangerous to do so otherwise.
sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 5670b1b8d5..f53330a5b9 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -608,16 +608,19 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
spanHasNoSpecials(s)
}
- if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
- // Find all newly freed objects. This doesn't have to
- // efficient; allocfreetrace has massive overhead.
+ if traceAllocFreeEnabled() || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
+ // Find all newly freed objects.
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
for i := uintptr(0); i < uintptr(s.nelems); i++ {
if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) {
x := s.base() + i*s.elemsize
- if debug.allocfreetrace != 0 {
- tracefree(unsafe.Pointer(x), size)
+ if traceAllocFreeEnabled() {
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.HeapObjectFree(x)
+ traceRelease(trace)
+ }
}
if debug.clobberfree != 0 {
clobberfree(unsafe.Pointer(x), size)
@@ -782,6 +785,19 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
if nfreed != 0 {
// Free large object span to heap.
+ // Count the free in the consistent, external stats.
+ //
+ // Do this before freeSpan, which might update heapStats' inHeap
+ // value. If it does so, then metrics that subtract object footprint
+ // from inHeap might overflow. See #67019.
+ stats := memstats.heapStats.acquire()
+ atomic.Xadd64(&stats.largeFreeCount, 1)
+ atomic.Xadd64(&stats.largeFree, int64(size))
+ memstats.heapStats.release()
+
+ // Count the free in the inconsistent, internal stats.
+ gcController.totalFree.Add(int64(size))
+
// NOTE(rsc,dvyukov): The original implementation of efence
// in CL 22060046 used sysFree instead of sysFault, so that
// the operating system would eventually give the memory
@@ -814,16 +830,6 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// invalid pointer. See arena.go:(*mheap).allocUserArenaChunk.
*(*uintptr)(unsafe.Pointer(&s.largeType)) = 0
}
-
- // Count the free in the consistent, external stats.
- stats := memstats.heapStats.acquire()
- atomic.Xadd64(&stats.largeFreeCount, 1)
- atomic.Xadd64(&stats.largeFree, int64(size))
- memstats.heapStats.release()
-
- // Count the free in the inconsistent, internal stats.
- gcController.totalFree.Add(int64(size))
-
return true
}
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index a68f855cab..35fd08af50 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1366,7 +1366,14 @@ HaveSpan:
}
memstats.heapStats.release()
- pageTraceAlloc(pp, now, base, npages)
+ // Trace the span alloc.
+ if traceAllocFreeEnabled() {
+ trace := traceTryAcquire()
+ if trace.ok() {
+ trace.SpanAlloc(s)
+ traceRelease(trace)
+ }
+ }
return s
}
@@ -1547,7 +1554,14 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
// Free the span back into the heap.
func (h *mheap) freeSpan(s *mspan) {
systemstack(func() {
- pageTraceFree(getg().m.p.ptr(), 0, s.base(), s.npages)
+ // Trace the span free.
+ if traceAllocFreeEnabled() {
+ trace := traceTryAcquire()
+ if trace.ok() {
+ trace.SpanFree(s)
+ traceRelease(trace)
+ }
+ }
lock(&h.lock)
if msanenabled {
@@ -1579,7 +1593,14 @@ func (h *mheap) freeSpan(s *mspan) {
//
//go:systemstack
func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
- pageTraceFree(getg().m.p.ptr(), 0, s.base(), s.npages)
+ // Trace the span free.
+ if traceAllocFreeEnabled() {
+ trace := traceTryAcquire()
+ if trace.ok() {
+ trace.SpanFree(s)
+ traceRelease(trace)
+ }
+ }
s.needzero = 1
lock(&h.lock)
diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go
index 2b4b5e99cd..1239b4a546 100644
--- a/src/runtime/mklockrank.go
+++ b/src/runtime/mklockrank.go
@@ -125,6 +125,7 @@ allg,
< fin,
spanSetSpine,
mspanSpecial,
+ traceTypeTab,
MPROF;
# We can acquire gcBitsArenas for pinner bits, and
diff --git a/src/runtime/mksizeclasses.go b/src/runtime/mksizeclasses.go
index 26ca49e6eb..bb06ba1edd 100644
--- a/src/runtime/mksizeclasses.go
+++ b/src/runtime/mksizeclasses.go
@@ -75,6 +75,7 @@ func main() {
const (
// Constants that we use and will transfer to the runtime.
+ minHeapAlign = 8
maxSmallSize = 32 << 10
smallSizeDiv = 8
smallSizeMax = 1024
@@ -99,7 +100,7 @@ func makeClasses() []class {
classes = append(classes, class{}) // class #0 is a dummy entry
- align := 8
+ align := minHeapAlign
for size := align; size <= maxSmallSize; size += align {
if powerOfTwo(size) { // bump alignment once in a while
if size >= 2048 {
@@ -288,6 +289,7 @@ func maxObjsPerSpan(classes []class) int {
func printClasses(w io.Writer, classes []class) {
fmt.Fprintln(w, "const (")
+ fmt.Fprintf(w, "minHeapAlign = %d\n", minHeapAlign)
fmt.Fprintf(w, "_MaxSmallSize = %d\n", maxSmallSize)
fmt.Fprintf(w, "smallSizeDiv = %d\n", smallSizeDiv)
fmt.Fprintf(w, "smallSizeMax = %d\n", smallSizeMax)
diff --git a/src/runtime/mpallocbits.go b/src/runtime/mpallocbits.go
index 9f447557c6..d8a9d25789 100644
--- a/src/runtime/mpallocbits.go
+++ b/src/runtime/mpallocbits.go
@@ -324,7 +324,6 @@ func (b *pallocBits) findLargeN(npages uintptr, searchIdx uint) (uint, uint) {
}
s := uint(sys.TrailingZeros64(x))
if s+size >= uint(npages) {
- size += s
return start, newSearchIdx
}
if s < 64 {
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 87eed8d1dd..b51a1ad3ce 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -9,6 +9,7 @@ package runtime
import (
"internal/abi"
+ "internal/profilerecord"
"internal/runtime/atomic"
"runtime/internal/sys"
"unsafe"
@@ -39,11 +40,20 @@ const (
// size of bucket hash table
buckHashSize = 179999
- // maxStack is the max depth of stack to record in bucket.
- // Note that it's only used internally as a guard against
- // wildly out-of-bounds slicing of the PCs that come after
- // a bucket struct, and it could increase in the future.
- maxStack = 32
+ // maxSkip is to account for deferred inline expansion
+ // when using frame pointer unwinding. We record the stack
+ // with "physical" frame pointers but handle skipping "logical"
+ // frames at some point after collecting the stack. So
+ // we need extra space in order to avoid getting fewer than the
+ // desired maximum number of frames after expansion.
+ // This should be at least as large as the largest skip value
+ // used for profiling; otherwise stacks may be truncated inconsistently
+ maxSkip = 5
+
+ // maxProfStackDepth is the highest valid value for debug.profstackdepth.
+ // It's used for the bucket.stk func.
+ // TODO(fg): can we get rid of this?
+ maxProfStackDepth = 1024
)
type bucketType int
@@ -231,10 +241,11 @@ func newBucket(typ bucketType, nstk int) *bucket {
return b
}
-// stk returns the slice in b holding the stack.
+// stk returns the slice in b holding the stack. The caller can asssume that the
+// backing array is immutable.
func (b *bucket) stk() []uintptr {
- stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
- if b.nstk > maxStack {
+ stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
+ if b.nstk > maxProfStackDepth {
// prove that slicing works; otherwise a failure requires a P
throw("bad profile stack count")
}
@@ -422,15 +433,22 @@ func mProf_PostSweep() {
}
// Called by malloc to record a profiled block.
-func mProf_Malloc(p unsafe.Pointer, size uintptr) {
- var stk [maxStack]uintptr
- nstk := callers(4, stk[:])
-
+func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
+ if mp.profStack == nil {
+ // mp.profStack is nil if we happen to sample an allocation during the
+ // initialization of mp. This case is rare, so we just ignore such
+ // allocations. Change MemProfileRate to 1 if you need to reproduce such
+ // cases for testing purposes.
+ return
+ }
+ // Only use the part of mp.profStack we need and ignore the extra space
+ // reserved for delayed inline expansion with frame pointer unwinding.
+ nstk := callers(4, mp.profStack[:debug.profstackdepth])
index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
- b := stkbucket(memProfile, size, stk[:nstk], true)
- mp := b.mp()
- mpc := &mp.future[index]
+ b := stkbucket(memProfile, size, mp.profStack[:nstk], true)
+ mr := b.mp()
+ mpc := &mr.future[index]
lock(&profMemFutureLock[index])
mpc.allocs++
@@ -504,17 +522,54 @@ func blocksampled(cycles, rate int64) bool {
return true
}
+// saveblockevent records a profile event of the type specified by which.
+// cycles is the quantity associated with this event and rate is the sampling rate,
+// used to adjust the cycles value in the manner determined by the profile type.
+// skip is the number of frames to omit from the traceback associated with the event.
+// The traceback will be recorded from the stack of the goroutine associated with the current m.
+// skip should be positive if this event is recorded from the current stack
+// (e.g. when this is not called from a system stack)
func saveblockevent(cycles, rate int64, skip int, which bucketType) {
+ if debug.profstackdepth == 0 {
+ // profstackdepth is set to 0 by the user, so mp.profStack is nil and we
+ // can't record a stack trace.
+ return
+ }
+ if skip > maxSkip {
+ print("requested skip=", skip)
+ throw("invalid skip value")
+ }
gp := getg()
- var nstk int
- var stk [maxStack]uintptr
- if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callers(skip, stk[:])
+ mp := acquirem() // we must not be preempted while accessing profstack
+
+ nstk := 1
+ if tracefpunwindoff() || gp.m.hasCgoOnStack() {
+ mp.profStack[0] = logicalStackSentinel
+ if gp.m.curg == nil || gp.m.curg == gp {
+ nstk = callers(skip, mp.profStack[1:])
+ } else {
+ nstk = gcallers(gp.m.curg, skip, mp.profStack[1:])
+ }
} else {
- nstk = gcallers(gp.m.curg, skip, stk[:])
+ mp.profStack[0] = uintptr(skip)
+ if gp.m.curg == nil || gp.m.curg == gp {
+ if skip > 0 {
+ // We skip one fewer frame than the provided value for frame
+ // pointer unwinding because the skip value includes the current
+ // frame, whereas the saved frame pointer will give us the
+ // caller's return address first (so, not including
+ // saveblockevent)
+ mp.profStack[0] -= 1
+ }
+ nstk += fpTracebackPCs(unsafe.Pointer(getfp()), mp.profStack[1:])
+ } else {
+ mp.profStack[1] = gp.m.curg.sched.pc
+ nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[2:])
+ }
}
- saveBlockEventStack(cycles, rate, stk[:nstk], which)
+ saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
+ releasem(mp)
}
// lockTimer assists with profiling contention on runtime-internal locks.
@@ -613,12 +668,12 @@ func (lt *lockTimer) end() {
}
type mLockProfile struct {
- waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
- stack [maxStack]uintptr // stack that experienced contention in runtime.lockWithRank
- pending uintptr // *mutex that experienced contention (to be traceback-ed)
- cycles int64 // cycles attributable to "pending" (if set), otherwise to "stack"
- cyclesLost int64 // contention for which we weren't able to record a call stack
- disabled bool // attribute all time to "lost"
+ waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
+ stack []uintptr // stack that experienced contention in runtime.lockWithRank
+ pending uintptr // *mutex that experienced contention (to be traceback-ed)
+ cycles int64 // cycles attributable to "pending" (if set), otherwise to "stack"
+ cyclesLost int64 // contention for which we weren't able to record a call stack
+ disabled bool // attribute all time to "lost"
}
func (prof *mLockProfile) recordLock(cycles int64, l *mutex) {
@@ -675,6 +730,12 @@ func (prof *mLockProfile) recordUnlock(l *mutex) {
}
func (prof *mLockProfile) captureStack() {
+ if debug.profstackdepth == 0 {
+ // profstackdepth is set to 0 by the user, so mp.profStack is nil and we
+ // can't record a stack trace.
+ return
+ }
+
skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank
if staticLockRanking {
// When static lock ranking is enabled, we'll always be on the system
@@ -690,9 +751,10 @@ func (prof *mLockProfile) captureStack() {
}
prof.pending = 0
+ prof.stack[0] = logicalStackSentinel
if debug.runtimeContentionStacks.Load() == 0 {
- prof.stack[0] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum
- prof.stack[1] = 0
+ prof.stack[1] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum
+ prof.stack[2] = 0
return
}
@@ -703,7 +765,7 @@ func (prof *mLockProfile) captureStack() {
systemstack(func() {
var u unwinder
u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
- nstk = tracebackPCs(&u, skip, prof.stack[:])
+ nstk = 1 + tracebackPCs(&u, skip, prof.stack[1:])
})
if nstk < len(prof.stack) {
prof.stack[nstk] = 0
@@ -718,7 +780,7 @@ func (prof *mLockProfile) store() {
mp := acquirem()
prof.disabled = true
- nstk := maxStack
+ nstk := int(debug.profstackdepth)
for i := 0; i < nstk; i++ {
if pc := prof.stack[i]; pc == 0 {
nstk = i
@@ -733,6 +795,7 @@ func (prof *mLockProfile) store() {
saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile)
if lost > 0 {
lostStk := [...]uintptr{
+ logicalStackSentinel,
abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum,
}
saveBlockEventStack(lost, rate, lostStk[:], mutexProfile)
@@ -883,6 +946,16 @@ func (r *MemProfileRecord) Stack() []uintptr {
// the testing package's -test.memprofile flag instead
// of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
+ return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
+ copyMemProfileRecord(&p[0], r)
+ p = p[1:]
+ })
+}
+
+// memProfileInternal returns the number of records n in the profile. If there
+// are less than size records, copyFn is invoked for each record, and ok returns
+// true.
+func memProfileInternal(size int, inuseZero bool, copyFn func(profilerecord.MemProfileRecord)) (n int, ok bool) {
cycle := mProfCycle.read()
// If we're between mProf_NextCycle and mProf_Flush, take care
// of flushing to the active profile so we only have to look
@@ -922,14 +995,19 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
}
}
}
- if n <= len(p) {
+ if n <= size {
ok = true
- idx := 0
for b := head; b != nil; b = b.allnext {
mp := b.mp()
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
- record(&p[idx], b)
- idx++
+ r := profilerecord.MemProfileRecord{
+ AllocBytes: int64(mp.active.alloc_bytes),
+ FreeBytes: int64(mp.active.free_bytes),
+ AllocObjects: int64(mp.active.allocs),
+ FreeObjects: int64(mp.active.frees),
+ Stack: b.stk(),
+ }
+ copyFn(r)
}
}
}
@@ -937,24 +1015,30 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
return
}
-// Write b's data to r.
-func record(r *MemProfileRecord, b *bucket) {
- mp := b.mp()
- r.AllocBytes = int64(mp.active.alloc_bytes)
- r.FreeBytes = int64(mp.active.free_bytes)
- r.AllocObjects = int64(mp.active.allocs)
- r.FreeObjects = int64(mp.active.frees)
+func copyMemProfileRecord(dst *MemProfileRecord, src profilerecord.MemProfileRecord) {
+ dst.AllocBytes = src.AllocBytes
+ dst.FreeBytes = src.FreeBytes
+ dst.AllocObjects = src.AllocObjects
+ dst.FreeObjects = src.FreeObjects
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
+ racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
}
if msanenabled {
- msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+ msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
}
if asanenabled {
- asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+ asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
}
- copy(r.Stack0[:], b.stk())
- clear(r.Stack0[b.nstk:])
+ i := copy(dst.Stack0[:], src.Stack)
+ clear(dst.Stack0[i:])
+}
+
+//go:linkname pprof_memProfileInternal
+func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool) {
+ return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
+ p[0] = r
+ p = p[1:]
+ })
}
func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
@@ -983,41 +1067,66 @@ type BlockProfileRecord struct {
// the [testing] package's -test.blockprofile flag instead
// of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
+ return blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
+ copyBlockProfileRecord(&p[0], r)
+ p = p[1:]
+ })
+}
+
+// blockProfileInternal returns the number of records n in the profile. If there
+// are less than size records, copyFn is invoked for each record, and ok returns
+// true.
+func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
lock(&profBlockLock)
head := (*bucket)(bbuckets.Load())
for b := head; b != nil; b = b.allnext {
n++
}
- if n <= len(p) {
+ if n <= size {
ok = true
for b := head; b != nil; b = b.allnext {
bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
+ r := profilerecord.BlockProfileRecord{
+ Count: int64(bp.count),
+ Cycles: bp.cycles,
+ Stack: b.stk(),
+ }
// Prevent callers from having to worry about division by zero errors.
// See discussion on http://golang.org/cl/299991.
if r.Count == 0 {
r.Count = 1
}
- r.Cycles = bp.cycles
- if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
- }
- if msanenabled {
- msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
- }
- if asanenabled {
- asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
- }
- i := copy(r.Stack0[:], b.stk())
- clear(r.Stack0[i:])
- p = p[1:]
+ copyFn(r)
}
}
unlock(&profBlockLock)
return
}
+func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord) {
+ dst.Count = src.Count
+ dst.Cycles = src.Cycles
+ if raceenabled {
+ racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
+ }
+ if msanenabled {
+ msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
+ }
+ if asanenabled {
+ asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
+ }
+ i := fpunwindExpand(dst.Stack0[:], src.Stack)
+ clear(dst.Stack0[i:])
+}
+
+//go:linkname pprof_blockProfileInternal
+func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
+ return blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
+ p[0] = r
+ p = p[1:]
+ })
+}
+
// MutexProfile returns n, the number of records in the current mutex profile.
// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
// Otherwise, MutexProfile does not change p, and returns n, false.
@@ -1025,27 +1134,45 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
// Most clients should use the [runtime/pprof] package
// instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
+ return mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
+ copyBlockProfileRecord(&p[0], r)
+ p = p[1:]
+ })
+}
+
+// mutexProfileInternal returns the number of records n in the profile. If there
+// are less than size records, copyFn is invoked for each record, and ok returns
+// true.
+func mutexProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
lock(&profBlockLock)
head := (*bucket)(xbuckets.Load())
for b := head; b != nil; b = b.allnext {
n++
}
- if n <= len(p) {
+ if n <= size {
ok = true
for b := head; b != nil; b = b.allnext {
bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
- r.Cycles = bp.cycles
- i := copy(r.Stack0[:], b.stk())
- clear(r.Stack0[i:])
- p = p[1:]
+ r := profilerecord.BlockProfileRecord{
+ Count: int64(bp.count),
+ Cycles: bp.cycles,
+ Stack: b.stk(),
+ }
+ copyFn(r)
}
}
unlock(&profBlockLock)
return
}
+//go:linkname pprof_mutexProfileInternal
+func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
+ return mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
+ p[0] = r
+ p = p[1:]
+ })
+}
+
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
@@ -1053,28 +1180,45 @@ func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
+ return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
+ copy(p[0].Stack0[:], r.Stack)
+ p = p[1:]
+ })
+}
+
+// threadCreateProfileInternal returns the number of records n in the profile.
+// If there are less than size records, copyFn is invoked for each record, and
+// ok returns true.
+func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool) {
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
n++
}
- if n <= len(p) {
+ if n <= size {
ok = true
- i := 0
for mp := first; mp != nil; mp = mp.alllink {
- p[i].Stack0 = mp.createstack
- i++
+ r := profilerecord.StackRecord{Stack: mp.createstack[:]}
+ copyFn(r)
}
}
return
}
-//go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels
-func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+//go:linkname pprof_threadCreateInternal
+func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool) {
+ return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
+ p[0] = r
+ p = p[1:]
+ })
+}
+
+//go:linkname pprof_goroutineProfileWithLabels
+func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
return goroutineProfileWithLabels(p, labels)
}
// labels may be nil. If labels is non-nil, it must have the same length as p.
-func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if labels != nil && len(labels) != len(p) {
labels = nil
}
@@ -1086,7 +1230,7 @@ var goroutineProfile = struct {
sema uint32
active bool
offset atomic.Int64
- records []StackRecord
+ records []profilerecord.StackRecord
labels []unsafe.Pointer
}{
sema: 1,
@@ -1125,7 +1269,7 @@ func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileSt
return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
}
-func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if len(p) == 0 {
// An empty slice is obviously too small. Return a rough
// allocation estimate without bothering to STW. As long as
@@ -1138,6 +1282,7 @@ func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Point
ourg := getg()
+ pcbuf := makeProfStack() // see saveg() for explanation
stw := stopTheWorld(stwGoroutineProfile)
// Using gcount while the world is stopped should give us a consistent view
// of the number of live goroutines, minus the number of goroutines that are
@@ -1164,7 +1309,7 @@ func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Point
sp := getcallersp()
pc := getcallerpc()
systemstack(func() {
- saveg(pc, sp, ourg, &p[0])
+ saveg(pc, sp, ourg, &p[0], pcbuf)
})
if labels != nil {
labels[0] = ourg.labels
@@ -1186,7 +1331,7 @@ func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Point
if fing != nil {
fing.goroutineProfiled.Store(goroutineProfileSatisfied)
if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
- doRecordGoroutineProfile(fing)
+ doRecordGoroutineProfile(fing, pcbuf)
}
}
startTheWorld(stw)
@@ -1203,7 +1348,7 @@ func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Point
// call will start by adding itself to the profile (before the act of
// executing can cause any changes in its stack).
forEachGRace(func(gp1 *g) {
- tryRecordGoroutineProfile(gp1, Gosched)
+ tryRecordGoroutineProfile(gp1, pcbuf, Gosched)
})
stw = stopTheWorld(stwGoroutineProfileCleanup)
@@ -1247,13 +1392,13 @@ func tryRecordGoroutineProfileWB(gp1 *g) {
if getg().m.p.ptr() == nil {
throw("no P available, write barriers are forbidden")
}
- tryRecordGoroutineProfile(gp1, osyield)
+ tryRecordGoroutineProfile(gp1, nil, osyield)
}
// tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
// in the current goroutine profile: either that it should not be profiled, or
// that a snapshot of its call stack and labels are now in the profile.
-func tryRecordGoroutineProfile(gp1 *g, yield func()) {
+func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
if readgstatus(gp1) == _Gdead {
// Dead goroutines should not appear in the profile. Goroutines that
// start while profile collection is active will get goroutineProfiled
@@ -1288,7 +1433,7 @@ func tryRecordGoroutineProfile(gp1 *g, yield func()) {
// in this limbo.
mp := acquirem()
if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
- doRecordGoroutineProfile(gp1)
+ doRecordGoroutineProfile(gp1, pcbuf)
gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
}
releasem(mp)
@@ -1302,7 +1447,7 @@ func tryRecordGoroutineProfile(gp1 *g, yield func()) {
// goroutine that is coordinating the goroutine profile (running on its own
// stack), or from the scheduler in preparation to execute gp1 (running on the
// system stack).
-func doRecordGoroutineProfile(gp1 *g) {
+func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
if readgstatus(gp1) == _Grunning {
print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
throw("cannot read stack of running goroutine")
@@ -1325,14 +1470,14 @@ func doRecordGoroutineProfile(gp1 *g) {
// set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
// preventing it from being truly _Grunnable. So we'll use the system stack
// to avoid schedule delays.
- systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
+ systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset], pcbuf) })
if goroutineProfile.labels != nil {
goroutineProfile.labels[offset] = gp1.labels
}
}
-func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
gp := getg()
isOK := func(gp1 *g) bool {
@@ -1341,6 +1486,7 @@ func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n
return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
}
+ pcbuf := makeProfStack() // see saveg() for explanation
stw := stopTheWorld(stwGoroutineProfile)
// World is stopped, no locking required.
@@ -1359,7 +1505,7 @@ func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n
sp := getcallersp()
pc := getcallerpc()
systemstack(func() {
- saveg(pc, sp, gp, &r[0])
+ saveg(pc, sp, gp, &r[0], pcbuf)
})
r = r[1:]
@@ -1384,7 +1530,7 @@ func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n
// The world is stopped, so it cannot use cgocall (which will be
// blocked at exitsyscall). Do it on the system stack so it won't
// call into the schedular (see traceback.go:cgoContextPCs).
- systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
+ systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0], pcbuf) })
if labels != nil {
lbl[0] = gp1.labels
lbl = lbl[1:]
@@ -1408,17 +1554,41 @@ func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n
// Most clients should use the [runtime/pprof] package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool) {
+ records := make([]profilerecord.StackRecord, len(p))
+ n, ok = goroutineProfileInternal(records)
+ if !ok {
+ return
+ }
+ for i, mr := range records[0:n] {
+ copy(p[i].Stack0[:], mr.Stack)
+ }
+ return
+}
+func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool) {
return goroutineProfileWithLabels(p, nil)
}
-func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
+func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
+ // To reduce memory usage, we want to allocate a r.Stack that is just big
+ // enough to hold gp's stack trace. Naively we might achieve this by
+ // recording our stack trace into mp.profStack, and then allocating a
+ // r.Stack of the right size. However, mp.profStack is also used for
+ // allocation profiling, so it could get overwritten if the slice allocation
+ // gets profiled. So instead we record the stack trace into a temporary
+ // pcbuf which is usually given to us by our caller. When it's not, we have
+ // to allocate one here. This will only happen for goroutines that were in a
+ // syscall when the goroutine profile started or for goroutines that manage
+ // to execute before we finish iterating over all the goroutines.
+ if pcbuf == nil {
+ pcbuf = makeProfStack()
+ }
+
var u unwinder
u.initAt(pc, sp, 0, gp, unwindSilentErrors)
- n := tracebackPCs(&u, 0, r.Stack0[:])
- if n < len(r.Stack0) {
- r.Stack0[n] = 0
- }
+ n := tracebackPCs(&u, 0, pcbuf)
+ r.Stack = make([]uintptr, n)
+ copy(r.Stack, pcbuf)
}
// Stack formats a stack trace of the calling goroutine into buf
@@ -1459,61 +1629,3 @@ func Stack(buf []byte, all bool) int {
}
return n
}
-
-// Tracing of alloc/free/gc.
-
-var tracelock mutex
-
-func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- if typ == nil {
- print("tracealloc(", p, ", ", hex(size), ")\n")
- } else {
- print("tracealloc(", p, ", ", hex(size), ", ", toRType(typ).string(), ")\n")
- }
- if gp.m.curg == nil || gp == gp.m.curg {
- goroutineheader(gp)
- pc := getcallerpc()
- sp := getcallersp()
- systemstack(func() {
- traceback(pc, sp, 0, gp)
- })
- } else {
- goroutineheader(gp.m.curg)
- traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
- }
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
-
-func tracefree(p unsafe.Pointer, size uintptr) {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- print("tracefree(", p, ", ", hex(size), ")\n")
- goroutineheader(gp)
- pc := getcallerpc()
- sp := getcallersp()
- systemstack(func() {
- traceback(pc, sp, 0, gp)
- })
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
-
-func tracegc() {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- print("tracegc()\n")
- // running on m->g0 stack; show all non-g0 goroutines
- tracebackothers(gp)
- print("end tracegc\n")
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go
index bbfef80aec..7b37d91b24 100644
--- a/src/runtime/netpoll.go
+++ b/src/runtime/netpoll.go
@@ -207,6 +207,9 @@ var (
netpollWaiters atomic.Uint32
)
+// netpollWaiters is accessed in tests
+//go:linkname netpollWaiters
+
//go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit
func poll_runtime_pollServerInit() {
netpollGenericInit()
diff --git a/src/runtime/netpoll_kqueue_event.go b/src/runtime/netpoll_kqueue_event.go
index 6419656414..d5f783e607 100644
--- a/src/runtime/netpoll_kqueue_event.go
+++ b/src/runtime/netpoll_kqueue_event.go
@@ -12,7 +12,7 @@ package runtime
// get printed somehow and they search for it.
const kqIdent = 0xee1eb9f4
-func addWakeupEvent(_ int32) {
+func addWakeupEvent(kq int32) {
ev := keventt{
ident: kqIdent,
filter: _EVFILT_USER,
diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go
index 8c9cbe28ec..dd15705618 100644
--- a/src/runtime/os3_plan9.go
+++ b/src/runtime/os3_plan9.go
@@ -7,6 +7,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
+ "internal/stringslite"
"unsafe"
)
@@ -47,7 +48,7 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
// level by the program but will otherwise be ignored.
flags = _SigNotify
for sig, t = range sigtable {
- if hasPrefix(notestr, t.name) {
+ if stringslite.HasPrefix(notestr, t.name) {
flags = t.flags
break
}
diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go
index 5e355f1664..2dbb42ad03 100644
--- a/src/runtime/os_plan9.go
+++ b/src/runtime/os_plan9.go
@@ -7,6 +7,7 @@ package runtime
import (
"internal/abi"
"internal/runtime/atomic"
+ "internal/stringslite"
"unsafe"
)
@@ -124,7 +125,7 @@ func indexNoFloat(s, t string) int {
return 0
}
for i := 0; i < len(s); i++ {
- if s[i] == t[0] && hasPrefix(s[i:], t) {
+ if s[i] == t[0] && stringslite.HasPrefix(s[i:], t) {
return i
}
}
@@ -132,20 +133,20 @@ func indexNoFloat(s, t string) int {
}
func atolwhex(p string) int64 {
- for hasPrefix(p, " ") || hasPrefix(p, "\t") {
+ for stringslite.HasPrefix(p, " ") || stringslite.HasPrefix(p, "\t") {
p = p[1:]
}
neg := false
- if hasPrefix(p, "-") || hasPrefix(p, "+") {
+ if stringslite.HasPrefix(p, "-") || stringslite.HasPrefix(p, "+") {
neg = p[0] == '-'
p = p[1:]
- for hasPrefix(p, " ") || hasPrefix(p, "\t") {
+ for stringslite.HasPrefix(p, " ") || stringslite.HasPrefix(p, "\t") {
p = p[1:]
}
}
var n int64
switch {
- case hasPrefix(p, "0x"), hasPrefix(p, "0X"):
+ case stringslite.HasPrefix(p, "0x"), stringslite.HasPrefix(p, "0X"):
p = p[2:]
for ; len(p) > 0; p = p[1:] {
if '0' <= p[0] && p[0] <= '9' {
@@ -158,7 +159,7 @@ func atolwhex(p string) int64 {
break
}
}
- case hasPrefix(p, "0"):
+ case stringslite.HasPrefix(p, "0"):
for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] {
n = n*8 + int64(p[0]-'0')
}
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index d65e0c91f4..4aabc29644 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -43,6 +43,7 @@ const (
//go:cgo_import_dynamic runtime._LoadLibraryW LoadLibraryW%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._PostQueuedCompletionStatus PostQueuedCompletionStatus%4 "kernel32.dll"
//go:cgo_import_dynamic runtime._QueryPerformanceCounter QueryPerformanceCounter%1 "kernel32.dll"
+//go:cgo_import_dynamic runtime._QueryPerformanceFrequency QueryPerformanceFrequency%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._RaiseFailFastException RaiseFailFastException%3 "kernel32.dll"
//go:cgo_import_dynamic runtime._ResumeThread ResumeThread%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._RtlLookupFunctionEntry RtlLookupFunctionEntry%3 "kernel32.dll"
@@ -100,6 +101,7 @@ var (
_LoadLibraryW,
_PostQueuedCompletionStatus,
_QueryPerformanceCounter,
+ _QueryPerformanceFrequency,
_RaiseFailFastException,
_ResumeThread,
_RtlLookupFunctionEntry,
@@ -246,6 +248,20 @@ func windowsLoadSystemLib(name []uint16) uintptr {
return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
}
+//go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
+func windows_QueryPerformanceCounter() int64 {
+ var counter int64
+ stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
+ return counter
+}
+
+//go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency
+func windows_QueryPerformanceFrequency() int64 {
+ var frequency int64
+ stdcall1(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
+ return frequency
+}
+
func loadOptionalSyscalls() {
bcryptPrimitives := windowsLoadSystemLib(bcryptprimitivesdll[:])
if bcryptPrimitives == 0 {
diff --git a/src/runtime/pagetrace_off.go b/src/runtime/pagetrace_off.go
deleted file mode 100644
index 10b44d40ce..0000000000
--- a/src/runtime/pagetrace_off.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.pagetrace
-
-package runtime
-
-//go:systemstack
-func pageTraceAlloc(pp *p, now int64, base, npages uintptr) {
-}
-
-//go:systemstack
-func pageTraceFree(pp *p, now int64, base, npages uintptr) {
-}
-
-//go:systemstack
-func pageTraceScav(pp *p, now int64, base, npages uintptr) {
-}
-
-type pageTraceBuf struct {
-}
-
-func initPageTrace(env string) {
-}
-
-func finishPageTrace() {
-}
diff --git a/src/runtime/pagetrace_on.go b/src/runtime/pagetrace_on.go
deleted file mode 100644
index f82521caad..0000000000
--- a/src/runtime/pagetrace_on.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.pagetrace
-
-// Page tracer.
-//
-// This file contains an implementation of page trace instrumentation for tracking
-// the way the Go runtime manages pages of memory. The trace may be enabled at program
-// startup with the GODEBUG option pagetrace.
-//
-// Each page trace event is either 8 or 16 bytes wide. The first
-// 8 bytes follow this format for non-sync events:
-//
-// [16 timestamp delta][35 base address][10 npages][1 isLarge][2 pageTraceEventType]
-//
-// If the "large" bit is set then the event is 16 bytes wide with the second 8 byte word
-// containing the full npages value (the npages bitfield is 0).
-//
-// The base address's bottom pageShift bits are always zero hence why we can pack other
-// data in there. We ignore the top 16 bits, assuming a 48 bit address space for the
-// heap.
-//
-// The timestamp delta is computed from the difference between the current nanotime
-// timestamp and the last sync event's timestamp. The bottom pageTraceTimeLostBits of
-// this delta is removed and only the next pageTraceTimeDeltaBits are kept.
-//
-// A sync event is emitted at the beginning of each trace buffer and whenever the
-// timestamp delta would not fit in an event.
-//
-// Sync events have the following structure:
-//
-// [61 timestamp or P ID][1 isPID][2 pageTraceSyncEvent]
-//
-// In essence, the "large" bit repurposed to indicate whether it's a timestamp or a P ID
-// (these are typically uint32). Note that we only have 61 bits for the 64-bit timestamp,
-// but like for the delta we drop the bottom pageTraceTimeLostBits here as well.
-
-package runtime
-
-import (
- "runtime/internal/sys"
- "unsafe"
-)
-
-// pageTraceAlloc records a page trace allocation event.
-// pp may be nil. Call only if debug.pagetracefd != 0.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func pageTraceAlloc(pp *p, now int64, base, npages uintptr) {
- if pageTrace.enabled {
- if now == 0 {
- now = nanotime()
- }
- pageTraceEmit(pp, now, base, npages, pageTraceAllocEvent)
- }
-}
-
-// pageTraceFree records a page trace free event.
-// pp may be nil. Call only if debug.pagetracefd != 0.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func pageTraceFree(pp *p, now int64, base, npages uintptr) {
- if pageTrace.enabled {
- if now == 0 {
- now = nanotime()
- }
- pageTraceEmit(pp, now, base, npages, pageTraceFreeEvent)
- }
-}
-
-// pageTraceScav records a page trace scavenge event.
-// pp may be nil. Call only if debug.pagetracefd != 0.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func pageTraceScav(pp *p, now int64, base, npages uintptr) {
- if pageTrace.enabled {
- if now == 0 {
- now = nanotime()
- }
- pageTraceEmit(pp, now, base, npages, pageTraceScavEvent)
- }
-}
-
-// pageTraceEventType is a page trace event type.
-type pageTraceEventType uint8
-
-const (
- pageTraceSyncEvent pageTraceEventType = iota // Timestamp emission.
- pageTraceAllocEvent // Allocation of pages.
- pageTraceFreeEvent // Freeing pages.
- pageTraceScavEvent // Scavenging pages.
-)
-
-// pageTraceEmit emits a page trace event.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func pageTraceEmit(pp *p, now int64, base, npages uintptr, typ pageTraceEventType) {
- // Get a buffer.
- var tbp *pageTraceBuf
- pid := int32(-1)
- if pp == nil {
- // We have no P, so take the global buffer.
- lock(&pageTrace.lock)
- tbp = &pageTrace.buf
- } else {
- tbp = &pp.pageTraceBuf
- pid = pp.id
- }
-
- // Initialize the buffer if necessary.
- tb := *tbp
- if tb.buf == nil {
- tb.buf = (*pageTraceEvents)(sysAlloc(pageTraceBufSize, &memstats.other_sys))
- tb = tb.writePid(pid)
- }
-
- // Handle timestamp and emit a sync event if necessary.
- if now < tb.timeBase {
- now = tb.timeBase
- }
- if now-tb.timeBase >= pageTraceTimeMaxDelta {
- tb.timeBase = now
- tb = tb.writeSync(pid)
- }
-
- // Emit the event.
- tb = tb.writeEvent(pid, now, base, npages, typ)
-
- // Write back the buffer.
- *tbp = tb
- if pp == nil {
- unlock(&pageTrace.lock)
- }
-}
-
-const (
- pageTraceBufSize = 32 << 10
-
- // These constants describe the per-event timestamp delta encoding.
- pageTraceTimeLostBits = 7 // How many bits of precision we lose in the delta.
- pageTraceTimeDeltaBits = 16 // Size of the delta in bits.
- pageTraceTimeMaxDelta = 1 << (pageTraceTimeLostBits + pageTraceTimeDeltaBits)
-)
-
-// pageTraceEvents is the low-level buffer containing the trace data.
-type pageTraceEvents struct {
- _ sys.NotInHeap
- events [pageTraceBufSize / 8]uint64
-}
-
-// pageTraceBuf is a wrapper around pageTraceEvents that knows how to write events
-// to the buffer. It tracks state necessary to do so.
-type pageTraceBuf struct {
- buf *pageTraceEvents
- len int // How many events have been written so far.
- timeBase int64 // The current timestamp base from which deltas are produced.
- finished bool // Whether this trace buf should no longer flush anything out.
-}
-
-// writePid writes a P ID event indicating which P we're running on.
-//
-// Assumes there's always space in the buffer since this is only called at the
-// beginning of a new buffer.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func (tb pageTraceBuf) writePid(pid int32) pageTraceBuf {
- e := uint64(int64(pid))<<3 | 0b100 | uint64(pageTraceSyncEvent)
- tb.buf.events[tb.len] = e
- tb.len++
- return tb
-}
-
-// writeSync writes a sync event, which is just a timestamp. Handles flushing.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func (tb pageTraceBuf) writeSync(pid int32) pageTraceBuf {
- if tb.len+1 > len(tb.buf.events) {
- // N.B. flush will writeSync again.
- return tb.flush(pid, tb.timeBase)
- }
- e := ((uint64(tb.timeBase) >> pageTraceTimeLostBits) << 3) | uint64(pageTraceSyncEvent)
- tb.buf.events[tb.len] = e
- tb.len++
- return tb
-}
-
-// writeEvent handles writing all non-sync and non-pid events. Handles flushing if necessary.
-//
-// pid indicates the P we're currently running on. Necessary in case we need to flush.
-// now is the current nanotime timestamp.
-// base is the base address of whatever group of pages this event is happening to.
-// npages is the length of the group of pages this event is happening to.
-// typ is the event that's happening to these pages.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func (tb pageTraceBuf) writeEvent(pid int32, now int64, base, npages uintptr, typ pageTraceEventType) pageTraceBuf {
- large := 0
- np := npages
- if npages >= 1024 {
- large = 1
- np = 0
- }
- if tb.len+1+large > len(tb.buf.events) {
- tb = tb.flush(pid, now)
- }
- if base%pageSize != 0 {
- throw("base address not page aligned")
- }
- e := uint64(base)
- // The pageShift low-order bits are zero.
- e |= uint64(typ) // 2 bits
- e |= uint64(large) << 2 // 1 bit
- e |= uint64(np) << 3 // 10 bits
- // Write the timestamp delta in the upper pageTraceTimeDeltaBits.
- e |= uint64((now-tb.timeBase)>>pageTraceTimeLostBits) << (64 - pageTraceTimeDeltaBits)
- tb.buf.events[tb.len] = e
- if large != 0 {
- // npages doesn't fit in 10 bits, so write an additional word with that data.
- tb.buf.events[tb.len+1] = uint64(npages)
- }
- tb.len += 1 + large
- return tb
-}
-
-// flush writes out the contents of the buffer to pageTrace.fd and resets the buffer.
-// It then writes out a P ID event and the first sync event for the new buffer.
-//
-// Must run on the system stack as a crude way to prevent preemption.
-//
-//go:systemstack
-func (tb pageTraceBuf) flush(pid int32, now int64) pageTraceBuf {
- if !tb.finished {
- lock(&pageTrace.fdLock)
- writeFull(uintptr(pageTrace.fd), (*byte)(unsafe.Pointer(&tb.buf.events[0])), tb.len*8)
- unlock(&pageTrace.fdLock)
- }
- tb.len = 0
- tb.timeBase = now
- return tb.writePid(pid).writeSync(pid)
-}
-
-var pageTrace struct {
- // enabled indicates whether tracing is enabled. If true, fd >= 0.
- //
- // Safe to read without synchronization because it's only set once
- // at program initialization.
- enabled bool
-
- // buf is the page trace buffer used if there is no P.
- //
- // lock protects buf.
- lock mutex
- buf pageTraceBuf
-
- // fdLock protects writing to fd.
- //
- // fd is the file to write the page trace to.
- fdLock mutex
- fd int32
-}
-
-// initPageTrace initializes the page tracing infrastructure from GODEBUG.
-//
-// env must be the value of the GODEBUG environment variable.
-func initPageTrace(env string) {
- var value string
- for env != "" {
- elt, rest := env, ""
- for i := 0; i < len(env); i++ {
- if env[i] == ',' {
- elt, rest = env[:i], env[i+1:]
- break
- }
- }
- env = rest
- if hasPrefix(elt, "pagetrace=") {
- value = elt[len("pagetrace="):]
- break
- }
- }
- pageTrace.fd = -1
- if canCreateFile && value != "" {
- var tmp [4096]byte
- if len(value) != 0 && len(value) < 4096 {
- copy(tmp[:], value)
- pageTrace.fd = create(&tmp[0], 0o664)
- }
- }
- pageTrace.enabled = pageTrace.fd >= 0
-}
-
-// finishPageTrace flushes all P's trace buffers and disables page tracing.
-func finishPageTrace() {
- if !pageTrace.enabled {
- return
- }
- // Grab worldsema as we're about to execute a ragged barrier.
- semacquire(&worldsema)
- systemstack(func() {
- // Disable tracing. This isn't strictly necessary and it's best-effort.
- pageTrace.enabled = false
-
- // Execute a ragged barrier, flushing each trace buffer.
- forEachP(waitReasonPageTraceFlush, func(pp *p) {
- if pp.pageTraceBuf.buf != nil {
- pp.pageTraceBuf = pp.pageTraceBuf.flush(pp.id, nanotime())
- }
- pp.pageTraceBuf.finished = true
- })
-
- // Write the global have-no-P buffer.
- lock(&pageTrace.lock)
- if pageTrace.buf.buf != nil {
- pageTrace.buf = pageTrace.buf.flush(-1, nanotime())
- }
- pageTrace.buf.finished = true
- unlock(&pageTrace.lock)
-
- // Safely close the file as nothing else should be allowed to write to the fd.
- lock(&pageTrace.fdLock)
- closefd(pageTrace.fd)
- pageTrace.fd = -1
- unlock(&pageTrace.fdLock)
- })
- semrelease(&worldsema)
-}
-
-// writeFull ensures that a complete write of bn bytes from b is made to fd.
-func writeFull(fd uintptr, b *byte, bn int) {
- for bn > 0 {
- n := write(fd, unsafe.Pointer(b), int32(bn))
- if n == -_EINTR || n == -_EAGAIN {
- continue
- }
- if n < 0 {
- print("errno=", -n, "\n")
- throw("writeBytes: bad write")
- }
- bn -= int(n)
- b = addb(b, uintptr(n))
- }
-}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 51b57520c1..98e96b12bf 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -8,6 +8,7 @@ import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
+ "internal/stringslite"
"runtime/internal/sys"
"unsafe"
)
@@ -53,7 +54,7 @@ const (
// pc should be the program counter of the compiler-generated code that
// triggered this panic.
func panicCheck1(pc uintptr, msg string) {
- if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
+ if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") {
// Note: wasm can't tail call, so we can't get the original caller's pc.
throw(msg)
}
@@ -296,11 +297,24 @@ func deferproc(fn func()) {
// been set and must not be clobbered.
}
-var rangeExitError = error(errorString("range function continued iteration after exit"))
+var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
+var rangePanicError = error(errorString("range function continued iteration after loop body panic"))
+var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit"))
+var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking"))
//go:noinline
-func panicrangeexit() {
- panic(rangeExitError)
+func panicrangestate(state int) {
+ switch abi.RF_State(state) {
+ case abi.RF_DONE:
+ panic(rangeDoneError)
+ case abi.RF_PANIC:
+ panic(rangePanicError)
+ case abi.RF_EXHAUSTED:
+ panic(rangeExhaustedError)
+ case abi.RF_MISSING_PANIC:
+ panic(rangeMissingPanicError)
+ }
+ throw("unexpected state passed to panicrangestate")
}
// deferrangefunc is called by functions that are about to
@@ -656,7 +670,7 @@ func printpanics(p *_panic) {
return
}
print("panic: ")
- printany(p.arg)
+ printpanicval(p.arg)
if p.recovered {
print(" [recovered]")
}
@@ -706,6 +720,18 @@ func (*PanicNilError) RuntimeError() {}
var panicnil = &godebugInc{name: "panicnil"}
// The implementation of the predeclared function panic.
+// The compiler emits calls to this function.
+//
+// gopanic should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - go.undefinedlabs.com/scopeagent
+// - github.com/goplus/igop
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname gopanic
func gopanic(e any) {
if e == nil {
if debug.panicnil.Load() != 1 {
@@ -718,20 +744,20 @@ func gopanic(e any) {
gp := getg()
if gp.m.curg != gp {
print("panic: ")
- printany(e)
+ printpanicval(e)
print("\n")
throw("panic on system stack")
}
if gp.m.mallocing != 0 {
print("panic: ")
- printany(e)
+ printpanicval(e)
print("\n")
throw("panic during malloc")
}
if gp.m.preemptoff != "" {
print("panic: ")
- printany(e)
+ printpanicval(e)
print("\n")
print("preempt off reason: ")
print(gp.m.preemptoff)
@@ -740,7 +766,7 @@ func gopanic(e any) {
}
if gp.m.locks != 0 {
print("panic: ")
- printany(e)
+ printpanicval(e)
print("\n")
throw("panic holding locks")
}
@@ -1010,12 +1036,32 @@ func sync_fatal(s string) {
// throw should be used for runtime-internal fatal errors where Go itself,
// rather than user code, may be at fault for the failure.
//
+// NOTE: temporarily marked "go:noinline" pending investigation/fix of
+// issue #67274, so as to fix longtest builders.
+//
+// throw should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cockroachdb/pebble
+// - github.com/dgraph-io/ristretto
+// - github.com/outcaste-io/ristretto
+// - github.com/pingcap/br
+// - gvisor.dev/gvisor
+// - github.com/sagernet/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname throw
//go:nosplit
func throw(s string) {
// Everything throw does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
systemstack(func() {
- print("fatal error: ", s, "\n")
+ print("fatal error: ")
+ printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
+ print("\n")
})
fatalthrow(throwTypeRuntime)
@@ -1034,7 +1080,9 @@ func fatal(s string) {
// Everything fatal does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
systemstack(func() {
- print("fatal error: ", s, "\n")
+ print("fatal error: ")
+ printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
+ print("\n")
})
fatalthrow(throwTypeUser)
diff --git a/src/runtime/panic_test.go b/src/runtime/panic_test.go
index b8a300f6b1..994abfdd45 100644
--- a/src/runtime/panic_test.go
+++ b/src/runtime/panic_test.go
@@ -27,7 +27,7 @@ func TestPanicWithDirectlyPrintableCustomTypes(t *testing.T) {
{"panicCustomInt16", `panic: main.MyInt16(93)`},
{"panicCustomInt32", `panic: main.MyInt32(93)`},
{"panicCustomInt64", `panic: main.MyInt64(93)`},
- {"panicCustomString", `panic: main.MyString("Panic")`},
+ {"panicCustomString", `panic: main.MyString("Panic` + "\n\t" + `line two")`},
{"panicCustomUint", `panic: main.MyUint(93)`},
{"panicCustomUint8", `panic: main.MyUint8(93)`},
{"panicCustomUint16", `panic: main.MyUint16(93)`},
diff --git a/src/runtime/pprof/label.go b/src/runtime/pprof/label.go
index 3684ae34e5..41eece2f74 100644
--- a/src/runtime/pprof/label.go
+++ b/src/runtime/pprof/label.go
@@ -7,7 +7,7 @@ package pprof
import (
"context"
"fmt"
- "sort"
+ "slices"
"strings"
)
@@ -49,7 +49,7 @@ func (l *labelMap) String() string {
keyVals = append(keyVals, fmt.Sprintf("%q:%q", k, v))
}
- sort.Strings(keyVals)
+ slices.Sort(keyVals)
return "{" + strings.Join(keyVals, ", ") + "}"
}
diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go
index e352b39caf..be17e59875 100644
--- a/src/runtime/pprof/pprof.go
+++ b/src/runtime/pprof/pprof.go
@@ -74,10 +74,13 @@ package pprof
import (
"bufio"
+ "cmp"
"fmt"
"internal/abi"
+ "internal/profilerecord"
"io"
"runtime"
+ "slices"
"sort"
"strings"
"sync"
@@ -278,7 +281,9 @@ func Profiles() []*Profile {
all = append(all, p)
}
- sort.Slice(all, func(i, j int) bool { return all[i].name < all[j].name })
+ slices.SortFunc(all, func(a, b *Profile) int {
+ return strings.Compare(a.name, b.name)
+ })
return all
}
@@ -378,15 +383,7 @@ func (p *Profile) WriteTo(w io.Writer, debug int) error {
p.mu.Unlock()
// Map order is non-deterministic; make output deterministic.
- sort.Slice(all, func(i, j int) bool {
- t, u := all[i], all[j]
- for k := 0; k < len(t) && k < len(u); k++ {
- if t[k] != u[k] {
- return t[k] < u[k]
- }
- }
- return len(t) < len(u)
- })
+ slices.SortFunc(all, slices.Compare)
return printCountProfile(w, debug, p.name, stackProfile(all))
}
@@ -411,7 +408,7 @@ type countProfile interface {
// as the pprof-proto format output. Translations from cycle count to time duration
// are done because The proto expects count and time (nanoseconds) instead of count
// and the number of cycles for block, contention profiles.
-func printCountCycleProfile(w io.Writer, countName, cycleName string, records []runtime.BlockProfileRecord) error {
+func printCountCycleProfile(w io.Writer, countName, cycleName string, records []profilerecord.BlockProfileRecord) error {
// Output profile in protobuf form.
b := newProfileBuilder(w)
b.pbValueType(tagProfile_PeriodType, countName, "count")
@@ -419,16 +416,18 @@ func printCountCycleProfile(w io.Writer, countName, cycleName string, records []
b.pbValueType(tagProfile_SampleType, countName, "count")
b.pbValueType(tagProfile_SampleType, cycleName, "nanoseconds")
- cpuGHz := float64(runtime_cyclesPerSecond()) / 1e9
+ cpuGHz := float64(pprof_cyclesPerSecond()) / 1e9
values := []int64{0, 0}
var locs []uint64
+ expandedStack := pprof_makeProfStack()
for _, r := range records {
values[0] = r.Count
values[1] = int64(float64(r.Cycles) / cpuGHz)
// For count profiles, all stack addresses are
// return PCs, which is what appendLocsForStack expects.
- locs = b.appendLocsForStack(locs[:0], r.Stack())
+ n := pprof_fpunwindExpand(expandedStack[:], r.Stack)
+ locs = b.appendLocsForStack(locs[:0], expandedStack[:n])
b.pbSample(values, locs, nil)
}
b.build()
@@ -593,14 +592,14 @@ func writeHeapInternal(w io.Writer, debug int, defaultSampleType string) error {
// the two calls—so allocate a few extra records for safety
// and also try again if we're very unlucky.
// The loop should only execute one iteration in the common case.
- var p []runtime.MemProfileRecord
- n, ok := runtime.MemProfile(nil, true)
+ var p []profilerecord.MemProfileRecord
+ n, ok := pprof_memProfileInternal(nil, true)
for {
// Allocate room for a slightly bigger profile,
// in case a few more entries have been added
// since the call to MemProfile.
- p = make([]runtime.MemProfileRecord, n+50)
- n, ok = runtime.MemProfile(p, true)
+ p = make([]profilerecord.MemProfileRecord, n+50)
+ n, ok = pprof_memProfileInternal(p, true)
if ok {
p = p[0:n]
break
@@ -612,7 +611,9 @@ func writeHeapInternal(w io.Writer, debug int, defaultSampleType string) error {
return writeHeapProto(w, p, int64(runtime.MemProfileRate), defaultSampleType)
}
- sort.Slice(p, func(i, j int) bool { return p[i].InUseBytes() > p[j].InUseBytes() })
+ slices.SortFunc(p, func(a, b profilerecord.MemProfileRecord) int {
+ return cmp.Compare(a.InUseBytes(), b.InUseBytes())
+ })
b := bufio.NewWriter(w)
tw := tabwriter.NewWriter(b, 1, 8, 1, '\t', 0)
@@ -654,11 +655,11 @@ func writeHeapInternal(w io.Writer, debug int, defaultSampleType string) error {
fmt.Fprintf(w, "%d: %d [%d: %d] @",
r.InUseObjects(), r.InUseBytes(),
r.AllocObjects, r.AllocBytes)
- for _, pc := range r.Stack() {
+ for _, pc := range r.Stack {
fmt.Fprintf(w, " %#x", pc)
}
fmt.Fprintf(w, "\n")
- printStackRecord(w, r.Stack(), false)
+ printStackRecord(w, r.Stack, false)
}
// Print memstats information too.
@@ -713,8 +714,8 @@ func writeThreadCreate(w io.Writer, debug int) error {
// Until https://golang.org/issues/6104 is addressed, wrap
// ThreadCreateProfile because there's no point in tracking labels when we
// don't get any stack-traces.
- return writeRuntimeProfile(w, debug, "threadcreate", func(p []runtime.StackRecord, _ []unsafe.Pointer) (n int, ok bool) {
- return runtime.ThreadCreateProfile(p)
+ return writeRuntimeProfile(w, debug, "threadcreate", func(p []profilerecord.StackRecord, _ []unsafe.Pointer) (n int, ok bool) {
+ return pprof_threadCreateInternal(p)
})
}
@@ -723,15 +724,12 @@ func countGoroutine() int {
return runtime.NumGoroutine()
}
-// runtime_goroutineProfileWithLabels is defined in runtime/mprof.go
-func runtime_goroutineProfileWithLabels(p []runtime.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
-
// writeGoroutine writes the current runtime GoroutineProfile to w.
func writeGoroutine(w io.Writer, debug int) error {
if debug >= 2 {
return writeGoroutineStacks(w)
}
- return writeRuntimeProfile(w, debug, "goroutine", runtime_goroutineProfileWithLabels)
+ return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels)
}
func writeGoroutineStacks(w io.Writer) error {
@@ -755,14 +753,14 @@ func writeGoroutineStacks(w io.Writer) error {
return err
}
-func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runtime.StackRecord, []unsafe.Pointer) (int, bool)) error {
+func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]profilerecord.StackRecord, []unsafe.Pointer) (int, bool)) error {
// Find out how many records there are (fetch(nil)),
// allocate that many records, and get the data.
// There's a race—more records might be added between
// the two calls—so allocate a few extra records for safety
// and also try again if we're very unlucky.
// The loop should only execute one iteration in the common case.
- var p []runtime.StackRecord
+ var p []profilerecord.StackRecord
var labels []unsafe.Pointer
n, ok := fetch(nil, nil)
@@ -770,7 +768,7 @@ func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runti
// Allocate room for a slightly bigger profile,
// in case a few more entries have been added
// since the call to ThreadProfile.
- p = make([]runtime.StackRecord, n+10)
+ p = make([]profilerecord.StackRecord, n+10)
labels = make([]unsafe.Pointer, n+10)
n, ok = fetch(p, labels)
if ok {
@@ -784,12 +782,12 @@ func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runti
}
type runtimeProfile struct {
- stk []runtime.StackRecord
+ stk []profilerecord.StackRecord
labels []unsafe.Pointer
}
func (p *runtimeProfile) Len() int { return len(p.stk) }
-func (p *runtimeProfile) Stack(i int) []uintptr { return p.stk[i].Stack() }
+func (p *runtimeProfile) Stack(i int) []uintptr { return p.stk[i].Stack }
func (p *runtimeProfile) Label(i int) *labelMap { return (*labelMap)(p.labels[i]) }
var cpu struct {
@@ -894,20 +892,20 @@ func countMutex() int {
// writeBlock writes the current blocking profile to w.
func writeBlock(w io.Writer, debug int) error {
- return writeProfileInternal(w, debug, "contention", runtime.BlockProfile)
+ return writeProfileInternal(w, debug, "contention", pprof_blockProfileInternal)
}
// writeMutex writes the current mutex profile to w.
func writeMutex(w io.Writer, debug int) error {
- return writeProfileInternal(w, debug, "mutex", runtime.MutexProfile)
+ return writeProfileInternal(w, debug, "mutex", pprof_mutexProfileInternal)
}
// writeProfileInternal writes the current blocking or mutex profile depending on the passed parameters.
-func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile func([]runtime.BlockProfileRecord) (int, bool)) error {
- var p []runtime.BlockProfileRecord
+func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile func([]profilerecord.BlockProfileRecord) (int, bool)) error {
+ var p []profilerecord.BlockProfileRecord
n, ok := runtimeProfile(nil)
for {
- p = make([]runtime.BlockProfileRecord, n+50)
+ p = make([]profilerecord.BlockProfileRecord, n+50)
n, ok = runtimeProfile(p)
if ok {
p = p[:n]
@@ -915,7 +913,9 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu
}
}
- sort.Slice(p, func(i, j int) bool { return p[i].Cycles > p[j].Cycles })
+ slices.SortFunc(p, func(a, b profilerecord.BlockProfileRecord) int {
+ return cmp.Compare(b.Cycles, a.Cycles)
+ })
if debug <= 0 {
return printCountCycleProfile(w, "contentions", "delay", p)
@@ -926,19 +926,22 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu
w = tw
fmt.Fprintf(w, "--- %v:\n", name)
- fmt.Fprintf(w, "cycles/second=%v\n", runtime_cyclesPerSecond())
+ fmt.Fprintf(w, "cycles/second=%v\n", pprof_cyclesPerSecond())
if name == "mutex" {
fmt.Fprintf(w, "sampling period=%d\n", runtime.SetMutexProfileFraction(-1))
}
+ expandedStack := pprof_makeProfStack()
for i := range p {
r := &p[i]
fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
- for _, pc := range r.Stack() {
+ n := pprof_fpunwindExpand(expandedStack, r.Stack)
+ stack := expandedStack[:n]
+ for _, pc := range stack {
fmt.Fprintf(w, " %#x", pc)
}
fmt.Fprint(w, "\n")
if debug > 0 {
- printStackRecord(w, r.Stack(), true)
+ printStackRecord(w, stack, true)
}
}
@@ -948,4 +951,26 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu
return b.Flush()
}
-func runtime_cyclesPerSecond() int64
+//go:linkname pprof_goroutineProfileWithLabels runtime.pprof_goroutineProfileWithLabels
+func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
+
+//go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
+func pprof_cyclesPerSecond() int64
+
+//go:linkname pprof_memProfileInternal runtime.pprof_memProfileInternal
+func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool)
+
+//go:linkname pprof_blockProfileInternal runtime.pprof_blockProfileInternal
+func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
+
+//go:linkname pprof_mutexProfileInternal runtime.pprof_mutexProfileInternal
+func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
+
+//go:linkname pprof_threadCreateInternal runtime.pprof_threadCreateInternal
+func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool)
+
+//go:linkname pprof_fpunwindExpand runtime.pprof_fpunwindExpand
+func pprof_fpunwindExpand(dst, src []uintptr) int
+
+//go:linkname pprof_makeProfStack runtime.pprof_makeProfStack
+func pprof_makeProfStack() []uintptr
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index f57c1fed50..09abbb31ae 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -1272,7 +1272,7 @@ func TestMutexProfile(t *testing.T) {
if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[5], r3)
}
- t.Logf(prof)
+ t.Log(prof)
})
t.Run("proto", func(t *testing.T) {
// proto format
@@ -1349,14 +1349,21 @@ func TestMutexProfileRateAdjust(t *testing.T) {
}
for _, s := range p.Sample {
+ var match, runtimeInternal bool
for _, l := range s.Location {
for _, line := range l.Line {
if line.Function.Name == "runtime/pprof.blockMutex.func1" {
- contentions += s.Value[0]
- delay += s.Value[1]
+ match = true
+ }
+ if line.Function.Name == "runtime.unlock" {
+ runtimeInternal = true
}
}
}
+ if match && !runtimeInternal {
+ contentions += s.Value[0]
+ delay += s.Value[1]
+ }
}
return
}
@@ -2431,3 +2438,143 @@ func TestTimeVDSO(t *testing.T) {
}
}
}
+
+func TestProfilerStackDepth(t *testing.T) {
+ // Disable sampling, otherwise it's difficult to assert anything.
+ oldMemRate := runtime.MemProfileRate
+ runtime.MemProfileRate = 1
+ runtime.SetBlockProfileRate(1)
+ oldMutexRate := runtime.SetMutexProfileFraction(1)
+ t.Cleanup(func() {
+ runtime.MemProfileRate = oldMemRate
+ runtime.SetBlockProfileRate(0)
+ runtime.SetMutexProfileFraction(oldMutexRate)
+ })
+
+ const depth = 128
+ go produceProfileEvents(t, depth)
+ awaitBlockedGoroutine(t, "chan receive", "goroutineDeep", 1)
+
+ tests := []struct {
+ profiler string
+ prefix []string
+ }{
+ {"heap", []string{"runtime/pprof.allocDeep"}},
+ {"block", []string{"runtime.chanrecv1", "runtime/pprof.blockChanDeep"}},
+ {"mutex", []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexDeep"}},
+ {"goroutine", []string{"runtime.gopark", "runtime.chanrecv", "runtime.chanrecv1", "runtime/pprof.goroutineDeep"}},
+ }
+
+ for _, test := range tests {
+ t.Run(test.profiler, func(t *testing.T) {
+ var buf bytes.Buffer
+ if err := Lookup(test.profiler).WriteTo(&buf, 0); err != nil {
+ t.Fatalf("failed to write heap profile: %v", err)
+ }
+ p, err := profile.Parse(&buf)
+ if err != nil {
+ t.Fatalf("failed to parse heap profile: %v", err)
+ }
+ t.Logf("Profile = %v", p)
+
+ stks := stacks(p)
+ var stk []string
+ for _, s := range stks {
+ if hasPrefix(s, test.prefix) {
+ stk = s
+ break
+ }
+ }
+ if len(stk) != depth {
+ t.Fatalf("want stack depth = %d, got %d", depth, len(stk))
+ }
+
+ if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn {
+ t.Fatalf("want stack stack root %s, got %v", wantFn, rootFn)
+ }
+ })
+ }
+}
+
+func hasPrefix(stk []string, prefix []string) bool {
+ if len(prefix) > len(stk) {
+ return false
+ }
+ for i := range prefix {
+ if stk[i] != prefix[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// ensure that stack records are valid map keys (comparable)
+var _ = map[runtime.MemProfileRecord]struct{}{}
+var _ = map[runtime.StackRecord]struct{}{}
+
+// allocDeep calls itself n times before calling fn.
+func allocDeep(n int) {
+ if n > 1 {
+ allocDeep(n - 1)
+ return
+ }
+ memSink = make([]byte, 1<<20)
+}
+
+// blockChanDeep produces a block profile event at stack depth n, including the
+// caller.
+func blockChanDeep(t *testing.T, n int) {
+ if n > 1 {
+ blockChanDeep(t, n-1)
+ return
+ }
+ ch := make(chan struct{})
+ go func() {
+ awaitBlockedGoroutine(t, "chan receive", "blockChanDeep", 1)
+ ch <- struct{}{}
+ }()
+ <-ch
+}
+
+// blockMutexDeep produces a block profile event at stack depth n, including the
+// caller.
+func blockMutexDeep(t *testing.T, n int) {
+ if n > 1 {
+ blockMutexDeep(t, n-1)
+ return
+ }
+ var mu sync.Mutex
+ go func() {
+ mu.Lock()
+ mu.Lock()
+ }()
+ awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutexDeep", 1)
+ mu.Unlock()
+}
+
+// goroutineDeep blocks at stack depth n, including the caller until the test is
+// finished.
+func goroutineDeep(t *testing.T, n int) {
+ if n > 1 {
+ goroutineDeep(t, n-1)
+ return
+ }
+ wait := make(chan struct{}, 1)
+ t.Cleanup(func() {
+ wait <- struct{}{}
+ })
+ <-wait
+}
+
+// produceProfileEvents produces pprof events at the given stack depth and then
+// blocks in goroutineDeep until the test completes. The stack traces are
+// guaranteed to have exactly the desired depth with produceProfileEvents as
+// their root frame which is expected by TestProfilerStackDepth.
+func produceProfileEvents(t *testing.T, depth int) {
+ allocDeep(depth - 1) // -1 for produceProfileEvents, **
+ blockChanDeep(t, depth-2) // -2 for produceProfileEvents, **, chanrecv1
+ blockMutexDeep(t, depth-2) // -2 for produceProfileEvents, **, Unlock
+ memSink = nil
+ runtime.GC()
+ goroutineDeep(t, depth-4) // -4 for produceProfileEvents, **, chanrecv1, chanrev, gopark
+}
diff --git a/src/runtime/pprof/protomem.go b/src/runtime/pprof/protomem.go
index fa75a28c62..ab3550f43f 100644
--- a/src/runtime/pprof/protomem.go
+++ b/src/runtime/pprof/protomem.go
@@ -5,6 +5,7 @@
package pprof
import (
+ "internal/profilerecord"
"io"
"math"
"runtime"
@@ -12,7 +13,7 @@ import (
)
// writeHeapProto writes the current heap profile in protobuf format to w.
-func writeHeapProto(w io.Writer, p []runtime.MemProfileRecord, rate int64, defaultSampleType string) error {
+func writeHeapProto(w io.Writer, p []profilerecord.MemProfileRecord, rate int64, defaultSampleType string) error {
b := newProfileBuilder(w)
b.pbValueType(tagProfile_PeriodType, "space", "bytes")
b.pb.int64Opt(tagProfile_Period, rate)
@@ -29,7 +30,7 @@ func writeHeapProto(w io.Writer, p []runtime.MemProfileRecord, rate int64, defau
for _, r := range p {
hideRuntime := true
for tries := 0; tries < 2; tries++ {
- stk := r.Stack()
+ stk := r.Stack
// For heap profiles, all stack
// addresses are return PCs, which is
// what appendLocsForStack expects.
diff --git a/src/runtime/pprof/protomem_test.go b/src/runtime/pprof/protomem_test.go
index 5fb67c53f6..8e9732a331 100644
--- a/src/runtime/pprof/protomem_test.go
+++ b/src/runtime/pprof/protomem_test.go
@@ -8,6 +8,7 @@ import (
"bytes"
"fmt"
"internal/profile"
+ "internal/profilerecord"
"internal/testenv"
"runtime"
"slices"
@@ -24,10 +25,10 @@ func TestConvertMemProfile(t *testing.T) {
// from these and get back to addr1 and addr2.
a1, a2 := uintptr(addr1)+1, uintptr(addr2)+1
rate := int64(512 * 1024)
- rec := []runtime.MemProfileRecord{
- {AllocBytes: 4096, FreeBytes: 1024, AllocObjects: 4, FreeObjects: 1, Stack0: [32]uintptr{a1, a2}},
- {AllocBytes: 512 * 1024, FreeBytes: 0, AllocObjects: 1, FreeObjects: 0, Stack0: [32]uintptr{a2 + 1, a2 + 2}},
- {AllocBytes: 512 * 1024, FreeBytes: 512 * 1024, AllocObjects: 1, FreeObjects: 1, Stack0: [32]uintptr{a1 + 1, a1 + 2, a2 + 3}},
+ rec := []profilerecord.MemProfileRecord{
+ {AllocBytes: 4096, FreeBytes: 1024, AllocObjects: 4, FreeObjects: 1, Stack: []uintptr{a1, a2}},
+ {AllocBytes: 512 * 1024, FreeBytes: 0, AllocObjects: 1, FreeObjects: 0, Stack: []uintptr{a2 + 1, a2 + 2}},
+ {AllocBytes: 512 * 1024, FreeBytes: 512 * 1024, AllocObjects: 1, FreeObjects: 1, Stack: []uintptr{a1 + 1, a1 + 2, a2 + 3}},
}
periodType := &profile.ValueType{Type: "space", Unit: "bytes"}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index 82d85cd707..45b1b5e9c7 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -55,6 +55,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
+ "internal/stringslite"
)
type suspendGState struct {
@@ -416,9 +417,9 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
// Check the inner-most name
u, uf := newInlineUnwinder(f, pc)
name := u.srcFunc(uf).name()
- if hasPrefix(name, "runtime.") ||
- hasPrefix(name, "runtime/internal/") ||
- hasPrefix(name, "reflect.") {
+ if stringslite.HasPrefix(name, "runtime.") ||
+ stringslite.HasPrefix(name, "runtime/internal/") ||
+ stringslite.HasPrefix(name, "reflect.") {
// For now we never async preempt the runtime or
// anything closely tied to the runtime. Known issues
// include: various points in the scheduler ("don't
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 56f97fa9f7..c4f175b0b7 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -10,6 +10,8 @@ import (
"internal/goarch"
"internal/goos"
"internal/runtime/atomic"
+ "internal/runtime/exithook"
+ "internal/stringslite"
"runtime/internal/sys"
"unsafe"
)
@@ -308,6 +310,16 @@ func os_beforeExit(exitCode int) {
}
}
+func init() {
+ exithook.Gosched = Gosched
+ exithook.Goid = func() uint64 { return getg().goid }
+ exithook.Throw = throw
+}
+
+func runExitHooks(code int) {
+ exithook.Run(code)
+}
+
// start forcegc helper goroutine
func init() {
go forcegchelper()
@@ -381,6 +393,17 @@ func goschedIfBusy() {
// Reason explains why the goroutine has been parked. It is displayed in stack
// traces and heap dumps. Reasons should be unique and descriptive. Do not
// re-use reasons, add new ones.
+//
+// gopark should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+// - github.com/sagernet/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname gopark
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
if reason != waitReasonSleep {
checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
@@ -407,6 +430,16 @@ func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason,
gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
}
+// goready should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+// - github.com/sagernet/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname goready
func goready(gp *g, traceskip int) {
systemstack(func() {
ready(gp, traceskip, true)
@@ -578,7 +611,7 @@ func switchToCrashStack(fn func()) {
// Disable crash stack on Windows for now. Apparently, throwing an exception
// on a non-system-allocated crash stack causes EXCEPTION_STACK_OVERFLOW and
// hangs the process (see issue 63938).
-const crashStackImplemented = (GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "s390x" || GOARCH == "wasm") && GOOS != "windows"
+const crashStackImplemented = GOOS != "windows"
//go:noescape
func switchToCrashStack0(fn func()) // in assembly
@@ -729,7 +762,7 @@ func getGodebugEarly() string {
p := argv_index(argv, argc+1+i)
s := unsafe.String(p, findnull(p))
- if hasPrefix(s, prefix) {
+ if stringslite.HasPrefix(s, prefix) {
env = gostring(p)[len(prefix):]
break
}
@@ -784,10 +817,9 @@ func schedinit() {
stackinit()
mallocinit()
godebug := getGodebugEarly()
- initPageTrace(godebug) // must run after mallocinit but before anything allocates
- cpuinit(godebug) // must run before alginit
- randinit() // must run before alginit, mcommoninit
- alginit() // maps, hash, rand must not be used before this call
+ cpuinit(godebug) // must run before alginit
+ randinit() // must run before alginit, mcommoninit
+ alginit() // maps, hash, rand must not be used before this call
mcommoninit(gp.m, -1)
modulesinit() // provides activeModules
typelinksinit() // uses maps, activeModules
@@ -818,6 +850,9 @@ func schedinit() {
MemProfileRate = 0
}
+ // mcommoninit runs before parsedebugvars, so init profstacks again.
+ mProfStackInit(gp.m)
+
lock(&sched.lock)
sched.lastpoll.Store(nanotime())
procs := ncpu
@@ -922,8 +957,43 @@ func mcommoninit(mp *m, id int64) {
if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
mp.cgoCallers = new(cgoCallers)
}
+ mProfStackInit(mp)
+}
+
+// mProfStackInit is used to eagerly initialize stack trace buffers for
+// profiling. Lazy allocation would have to deal with reentrancy issues in
+// malloc and runtime locks for mLockProfile.
+// TODO(mknyszek): Implement lazy allocation if this becomes a problem.
+func mProfStackInit(mp *m) {
+ if debug.profstackdepth == 0 {
+ // debug.profstack is set to 0 by the user, or we're being called from
+ // schedinit before parsedebugvars.
+ return
+ }
+ mp.profStack = makeProfStackFP()
+ mp.mLockProfile.stack = makeProfStackFP()
}
+// makeProfStackFP creates a buffer large enough to hold a maximum-sized stack
+// trace as well as any additional frames needed for frame pointer unwinding
+// with delayed inline expansion.
+func makeProfStackFP() []uintptr {
+ // The "1" term is to account for the first stack entry being
+ // taken up by a "skip" sentinel value for profilers which
+ // defer inline frame expansion until the profile is reported.
+ // The "maxSkip" term is for frame pointer unwinding, where we
+ // want to end up with debug.profstackdebth frames but will discard
+ // some "physical" frames to account for skipping.
+ return make([]uintptr, 1+maxSkip+debug.profstackdepth)
+}
+
+// makeProfStack returns a buffer large enough to hold a maximum-sized stack
+// trace.
+func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
+
+//go:linkname pprof_makeProfStack
+func pprof_makeProfStack() []uintptr { return makeProfStack() }
+
func (mp *m) becomeSpinning() {
mp.spinning = true
sched.nmspinning.Add(1)
@@ -2343,11 +2413,6 @@ func oneNewExtraM() {
if raceenabled {
gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
}
- trace := traceAcquire()
- if trace.ok() {
- trace.OneNewExtraM(gp)
- traceRelease(trace)
- }
// put on allg for garbage collector
allgadd(gp)
@@ -2514,6 +2579,16 @@ func cgoBindM() {
}
// A helper function for EnsureDropM.
+//
+// getm should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - fortio.org/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname getm
func getm() uintptr {
return uintptr(unsafe.Pointer(getg().m))
}
@@ -3001,6 +3076,16 @@ func handoffp(pp *p) {
// Tries to add one more P to execute G's.
// Called when a G is made runnable (newproc, ready).
// Must be called with a P.
+//
+// wakep should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname wakep
func wakep() {
// Be conservative about spinning threads, only start one if none exist
// already.
@@ -3127,7 +3212,7 @@ func execute(gp *g, inheritTime bool) {
// Make sure that gp has had its stack written out to the goroutine
// profile, exactly as it was when the goroutine profiler first stopped
// the world.
- tryRecordGoroutineProfile(gp, osyield)
+ tryRecordGoroutineProfile(gp, nil, osyield)
}
// Assign gp.m before entering _Grunning so running Gs have an
@@ -4130,6 +4215,17 @@ func preemptPark(gp *g) {
// goyield is like Gosched, but it:
// - emits a GoPreempt trace event instead of a GoSched trace event
// - puts the current G on the runq of the current P instead of the globrunq
+//
+// goyield should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+// - github.com/sagernet/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname goyield
func goyield() {
checkTimeouts()
mcall(goyield_m)
@@ -4212,9 +4308,9 @@ func gdestroy(gp *g) {
return
}
- if mp.lockedInt != 0 {
- print("invalid m->lockedInt = ", mp.lockedInt, "\n")
- throw("internal lockOSThread error")
+ if locked && mp.lockedInt != 0 {
+ print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
+ throw("exited a goroutine internally locked to the OS thread")
}
gfput(pp, gp)
if locked {
@@ -4364,6 +4460,14 @@ func reentersyscall(pc, sp, bp uintptr) {
//
// This is exported via linkname to assembly in the syscall package and x/sys.
//
+// Other packages should not be accessing entersyscall directly,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:nosplit
//go:linkname entersyscall
func entersyscall() {
@@ -4416,7 +4520,16 @@ func entersyscall_gcwait() {
}
// The same as entersyscall(), but with a hint that the syscall is blocking.
+
+// entersyscallblock should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
//
+//go:linkname entersyscallblock
//go:nosplit
func entersyscallblock() {
gp := getg()
@@ -4478,6 +4591,14 @@ func entersyscallblock_handoff() {
//
// This is exported via linkname to assembly in the syscall package.
//
+// exitsyscall should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:nosplit
//go:nowritebarrierrec
//go:linkname exitsyscall
@@ -4698,6 +4819,15 @@ func exitsyscall0(gp *g) {
// Called from syscall package before fork.
//
+// syscall_runtime_BeforeFork is for package syscall,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/containerd/containerd
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
//go:nosplit
func syscall_runtime_BeforeFork() {
@@ -4719,6 +4849,15 @@ func syscall_runtime_BeforeFork() {
// Called from syscall package after fork in parent.
//
+// syscall_runtime_AfterFork is for package syscall,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/containerd/containerd
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
//go:nosplit
func syscall_runtime_AfterFork() {
@@ -4744,6 +4883,15 @@ var inForkedChild bool
// temporarily sharing address space with the parent process, this must
// not change any global variables or calling into C code that may do so.
//
+// syscall_runtime_AfterForkInChild is for package syscall,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/containerd/containerd
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
//go:nosplit
//go:nowritebarrierrec
@@ -5264,7 +5412,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc).
if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
if f := findfunc(pc); f.valid() {
- if hasPrefix(funcname(f), "internal/runtime/atomic") {
+ if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
cpuprof.lostAtomic++
return
}
@@ -5489,7 +5637,6 @@ func (pp *p) destroy() {
freemcache(pp.mcache)
pp.mcache = nil
gfpurge(pp)
- traceProcFree(pp)
if raceenabled {
if pp.timers.raceCtx != 0 {
// The race detector code uses a callback to fetch
@@ -6912,6 +7059,17 @@ func setMaxThreads(in int) (out int) {
return
}
+// procPin should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/gopkg
+// - github.com/choleraehyq/pid
+// - github.com/songzhibin97/gkit
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname procPin
//go:nosplit
func procPin() int {
gp := getg()
@@ -6921,6 +7079,17 @@ func procPin() int {
return int(mp.p.ptr().id)
}
+// procUnpin should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/gopkg
+// - github.com/choleraehyq/pid
+// - github.com/songzhibin97/gkit
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname procUnpin
//go:nosplit
func procUnpin() {
gp := getg()
@@ -6951,20 +7120,18 @@ func sync_atomic_runtime_procUnpin() {
procUnpin()
}
-//go:linkname internal_weak_runtime_procPin internal/weak.runtime_procPin
-//go:nosplit
-func internal_weak_runtime_procPin() int {
- return procPin()
-}
-
-//go:linkname internal_weak_runtime_procUnpin internal/weak.runtime_procUnpin
-//go:nosplit
-func internal_weak_runtime_procUnpin() {
- procUnpin()
-}
-
// Active spinning for sync.Mutex.
//
+// sync_runtime_canSpin should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/livekit/protocol
+// - github.com/sagernet/gvisor
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname sync_runtime_canSpin sync.runtime_canSpin
//go:nosplit
func sync_runtime_canSpin(i int) bool {
@@ -6982,6 +7149,16 @@ func sync_runtime_canSpin(i int) bool {
return true
}
+// sync_runtime_doSpin should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/livekit/protocol
+// - github.com/sagernet/gvisor
+// - gvisor.dev/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname sync_runtime_doSpin sync.runtime_doSpin
//go:nosplit
func sync_runtime_doSpin() {
diff --git a/src/runtime/proflabel.go b/src/runtime/proflabel.go
index b2a161729e..1a5e7e5e2f 100644
--- a/src/runtime/proflabel.go
+++ b/src/runtime/proflabel.go
@@ -8,6 +8,15 @@ import "unsafe"
var labelSync uintptr
+// runtime_setProfLabel should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/localsession
+// - github.com/DataDog/datadog-agent
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname runtime_setProfLabel runtime/pprof.runtime_setProfLabel
func runtime_setProfLabel(labels unsafe.Pointer) {
// Introduce race edge for read-back via profile.
@@ -34,6 +43,15 @@ func runtime_setProfLabel(labels unsafe.Pointer) {
getg().labels = labels
}
+// runtime_getProfLabel should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/localsession
+// - github.com/DataDog/datadog-agent
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname runtime_getProfLabel runtime/pprof.runtime_getProfLabel
func runtime_getProfLabel() unsafe.Pointer {
return getg().labels
diff --git a/src/runtime/race.go b/src/runtime/race.go
index 9acc0c6920..7d5cbce49e 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -323,6 +323,10 @@ var __tsan_report_count byte
//go:cgo_import_static __tsan_go_atomic64_exchange
//go:cgo_import_static __tsan_go_atomic32_fetch_add
//go:cgo_import_static __tsan_go_atomic64_fetch_add
+//go:cgo_import_static __tsan_go_atomic32_fetch_and
+//go:cgo_import_static __tsan_go_atomic64_fetch_and
+//go:cgo_import_static __tsan_go_atomic32_fetch_or
+//go:cgo_import_static __tsan_go_atomic64_fetch_or
//go:cgo_import_static __tsan_go_atomic32_compare_exchange
//go:cgo_import_static __tsan_go_atomic64_compare_exchange
@@ -642,6 +646,36 @@ func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
//go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
+//go:linkname abigen_sync_atomic_AndInt32 sync/atomic.AndInt32
+func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32)
+
+//go:linkname abigen_sync_atomic_AndUint32 sync/atomic.AndUint32
+func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32)
+
+//go:linkname abigen_sync_atomic_AndInt64 sync/atomic.AndInt64
+func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64)
+
+//go:linkname abigen_sync_atomic_AndUint64 sync/atomic.AndUint64
+func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64)
+
+//go:linkname abigen_sync_atomic_AndUintptr sync/atomic.AndUintptr
+func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
+
+//go:linkname abigen_sync_atomic_OrInt32 sync/atomic.OrInt32
+func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32)
+
+//go:linkname abigen_sync_atomic_OrUint32 sync/atomic.OrUint32
+func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32)
+
+//go:linkname abigen_sync_atomic_OrInt64 sync/atomic.OrInt64
+func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64)
+
+//go:linkname abigen_sync_atomic_OrUint64 sync/atomic.OrUint64
+func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64)
+
+//go:linkname abigen_sync_atomic_OrUintptr sync/atomic.OrUintptr
+func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
+
//go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
diff --git a/src/runtime/race/testdata/rangefunc_test.go b/src/runtime/race/testdata/rangefunc_test.go
new file mode 100644
index 0000000000..f2ff793df7
--- /dev/null
+++ b/src/runtime/race/testdata/rangefunc_test.go
@@ -0,0 +1,77 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc
+
+package race_test
+
+import (
+ "runtime"
+ "sync/atomic"
+ "testing"
+)
+
+type Seq2[T1, T2 any] func(yield func(T1, T2) bool)
+
+// ofSliceIndex returns a Seq over the elements of s. It is equivalent
+// to range s, except that it splits s into two halves and iterates
+// in two separate goroutines. This is racy if yield is racy, and yield
+// will be racy if it contains an early exit.
+func ofSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ c := make(chan bool, 2)
+ var done atomic.Bool
+ go func() {
+ for i := 0; i < len(s)/2; i++ {
+ if !done.Load() && !yield(i, s[i]) {
+ done.Store(true)
+ c <- false
+ }
+ }
+ c <- true
+ }()
+ go func() {
+ for i := len(s) / 2; i < len(s); i++ {
+ if !done.Load() && !yield(i, s[i]) {
+ done.Store(true)
+ c <- false
+ }
+ }
+ c <- true
+ return
+ }()
+ if !<-c {
+ return
+ }
+ <-c
+ }
+}
+
+// foo is racy, or not, depending on the value of v
+// (0-4 == racy, otherwise, not racy).
+func foo(v int) int64 {
+ var asum atomic.Int64
+ for i, x := range ofSliceIndex([]int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if i%5 == v {
+ break
+ }
+ asum.Add(x) // don't race on asum
+ runtime.Gosched()
+ }
+ return 100 + asum.Load()
+}
+
+// TestRaceRangeFuncIterator races because x%5 can be equal to 4,
+// therefore foo can early exit.
+func TestRaceRangeFuncIterator(t *testing.T) {
+ x := foo(4)
+ t.Logf("foo(4)=%d", x)
+}
+
+// TestNoRaceRangeFuncIterator does not race because x%5 is never 5,
+// therefore foo's loop will not exit early, and this it will not race.
+func TestNoRaceRangeFuncIterator(t *testing.T) {
+ x := foo(5)
+ t.Logf("foo(5)=%d", x)
+}
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index 45c1255509..c4a6d49316 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -303,6 +303,57 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
GO_ARGS
JMP sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT|NOFRAME, $0-20
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_fetch_and(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT|NOFRAME, $0-24
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_fetch_and(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT|NOFRAME, $0-20
+ GO_ARGS
+ MOVQ $__tsan_go_atomic32_fetch_or(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT|NOFRAME, $0-24
+ GO_ARGS
+ MOVQ $__tsan_go_atomic64_fetch_or(SB), AX
+ CALL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+
// CompareAndSwap
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT|NOFRAME, $0-17
GO_ARGS
diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s
index ae0030cf10..c42a6c1377 100644
--- a/src/runtime/race_arm64.s
+++ b/src/runtime/race_arm64.s
@@ -312,6 +312,56 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
GO_ARGS
JMP sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_and(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_and(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_or(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_or(SB), R9
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
// CompareAndSwap
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
GO_ARGS
diff --git a/src/runtime/race_ppc64le.s b/src/runtime/race_ppc64le.s
index 39cfffc39b..43829479bd 100644
--- a/src/runtime/race_ppc64le.s
+++ b/src/runtime/race_ppc64le.s
@@ -325,6 +325,52 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
GO_ARGS
BR sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_and(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_and(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ BR sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_or(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_or(SB), R8
+ BR racecallatomic<>(SB)
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ BR sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ BR sync∕atomic·OrInt64(SB)
+
// CompareAndSwap in tsan
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
GO_ARGS
diff --git a/src/runtime/race_s390x.s b/src/runtime/race_s390x.s
index dadc12f4db..8e6a5d576a 100644
--- a/src/runtime/race_s390x.s
+++ b/src/runtime/race_s390x.s
@@ -274,6 +274,56 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
GO_ARGS
JMP sync∕atomic·AddInt64(SB)
+// And
+TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_and(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_and(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·AndInt32(SB)
+
+TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·AndInt64(SB)
+
+// Or
+TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ MOVD $__tsan_go_atomic32_fetch_or(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ MOVD $__tsan_go_atomic64_fetch_or(SB), R1
+ BL racecallatomic<>(SB)
+ RET
+
+TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
+ GO_ARGS
+ JMP sync∕atomic·OrInt32(SB)
+
+TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
+TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
+ GO_ARGS
+ JMP sync∕atomic·OrInt64(SB)
+
// CompareAndSwap
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
diff --git a/src/runtime/rand.go b/src/runtime/rand.go
index 62577dda91..a66553feeb 100644
--- a/src/runtime/rand.go
+++ b/src/runtime/rand.go
@@ -178,6 +178,15 @@ func randn(n uint32) uint32 {
// the rule is that other packages using runtime-provided
// randomness must always use rand.
//
+// cheaprand should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/gopkg
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname cheaprand
//go:nosplit
func cheaprand() uint32 {
mp := getg().m
@@ -213,6 +222,15 @@ func cheaprand() uint32 {
// the rule is that other packages using runtime-provided
// randomness must always use rand.
//
+// cheaprand64 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/zhangyunhao116/fastrand
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname cheaprand64
//go:nosplit
func cheaprand64() int64 {
return int64(cheaprand())<<31 ^ int64(cheaprand())
@@ -224,6 +242,15 @@ func cheaprand64() int64 {
// the rule is that other packages using runtime-provided
// randomness must always use randn.
//
+// cheaprandn should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname cheaprandn
//go:nosplit
func cheaprandn(n uint32) uint32 {
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s
index 417ada21bf..4b7d8e1b94 100644
--- a/src/runtime/rt0_linux_ppc64le.s
+++ b/src/runtime/rt0_linux_ppc64le.s
@@ -78,7 +78,7 @@ TEXT _main<>(SB),NOSPLIT,$-8
// passes argc/argv similar to the linux kernel, R13 (TLS) is
// initialized, and R3/R4 are undefined.
MOVD (R1), R12
- CMP R0, R12
+ CMP R12, $0
BEQ tls_and_argcv_in_reg
// Arguments are passed via the stack (musl loader or a static binary)
@@ -86,7 +86,7 @@ TEXT _main<>(SB),NOSPLIT,$-8
ADD $8, R1, R4 // argv
// Did the TLS pointer get set? If so, don't change it (e.g musl).
- CMP R0, R13
+ CMP R13, $0
BNE tls_and_argcv_in_reg
MOVD $runtime·m0+m_tls(SB), R13 // TLS
diff --git a/src/runtime/runtime.go b/src/runtime/runtime.go
index 6ec0369a7e..e8e614815d 100644
--- a/src/runtime/runtime.go
+++ b/src/runtime/runtime.go
@@ -296,6 +296,30 @@ func setCrashFD(fd uintptr) uintptr {
// It contains an even number of elements, (tag, value) pairs.
var auxv []uintptr
-func getAuxv() []uintptr { return auxv } // accessed from x/sys/cpu; see issue 57336
+// golang.org/x/sys/cpu uses getAuxv via linkname.
+// Do not remove or change the type signature.
+// (See go.dev/issue/57336.)
+//
+// getAuxv should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cilium/ebpf
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname getAuxv
+func getAuxv() []uintptr { return auxv }
+// zeroVal is used by reflect via linkname.
+//
+// zeroVal should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname zeroVal
var zeroVal [abi.ZeroValSize]byte
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index 5b37d23e90..03ef74b8dc 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -330,14 +330,23 @@ var debug struct {
tracefpunwindoff int32
traceadvanceperiod int32
traceCheckStackOwnership int32
+ profstackdepth int32
// debug.malloc is used as a combined debug check
// in the malloc function and should be set
// if any of the below debug options is != 0.
- malloc bool
- allocfreetrace int32
- inittrace int32
- sbrk int32
+ malloc bool
+ inittrace int32
+ sbrk int32
+ // traceallocfree controls whether execution traces contain
+ // detailed trace data about memory allocation. This value
+ // affects debug.malloc only if it is != 0 and the execution
+ // tracer is enabled, in which case debug.malloc will be
+ // set to "true" if it isn't already while tracing is enabled.
+ // It will be set while the world is stopped, so it's safe.
+ // The value of traceallocfree can be changed any time in response
+ // to os.Setenv("GODEBUG").
+ traceallocfree atomic.Int32
panicnil atomic.Int32
@@ -354,7 +363,6 @@ var debug struct {
var dbgvars = []*dbgVar{
{name: "adaptivestackstart", value: &debug.adaptivestackstart},
- {name: "allocfreetrace", value: &debug.allocfreetrace},
{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
{name: "asynctimerchan", atomic: &debug.asynctimerchan},
{name: "cgocheck", value: &debug.cgocheck},
@@ -372,12 +380,14 @@ var dbgvars = []*dbgVar{
{name: "invalidptr", value: &debug.invalidptr},
{name: "madvdontneed", value: &debug.madvdontneed},
{name: "panicnil", atomic: &debug.panicnil},
+ {name: "profstackdepth", value: &debug.profstackdepth, def: 128},
{name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
{name: "sbrk", value: &debug.sbrk},
{name: "scavtrace", value: &debug.scavtrace},
{name: "scheddetail", value: &debug.scheddetail},
{name: "schedtrace", value: &debug.schedtrace},
{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
+ {name: "traceallocfree", atomic: &debug.traceallocfree},
{name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
{name: "tracebackancestors", value: &debug.tracebackancestors},
{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
@@ -425,7 +435,8 @@ func parsedebugvars() {
// apply environment settings
parsegodebug(godebug, nil)
- debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
+ debug.malloc = (debug.inittrace | debug.sbrk) != 0
+ debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
setTraceback(gogetenv("GOTRACEBACK"))
traceback_env = traceback_cache
@@ -605,6 +616,20 @@ func releasem(mp *m) {
}
}
+// reflect_typelinks is meant for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/goccy/json
+// - github.com/modern-go/reflect2
+// - github.com/vmware/govmomi
+// - github.com/pinpoint-apm/pinpoint-go-agent
+// - github.com/timandy/routine
+// - github.com/v2pro/plz
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_typelinks reflect.typelinks
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
modules := activeModules()
@@ -619,6 +644,14 @@ func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
// reflect_resolveNameOff resolves a name offset from a base pointer.
//
+// reflect_resolveNameOff is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/agiledragon/gomonkey/v2
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_resolveNameOff reflect.resolveNameOff
func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
@@ -626,6 +659,17 @@ func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointe
// reflect_resolveTypeOff resolves an *rtype offset from a base type.
//
+// reflect_resolveTypeOff is meant for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/modern-go/reflect2
+// - github.com/v2pro/plz
+// - github.com/timandy/routine
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
@@ -633,6 +677,15 @@ func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
// reflect_resolveTextOff resolves a function pointer offset from a base type.
//
+// reflect_resolveTextOff is for package reflect,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+// - github.com/agiledragon/gomonkey/v2
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return toRType((*_type)(rtype)).textOff(textOff(off))
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 0093a6ddb9..4a78963961 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -599,6 +599,7 @@ type m struct {
nextwaitm muintptr // next m waiting for lock
mLockProfile mLockProfile // fields relating to runtime.lock contention
+ profStack []uintptr // used for memory/block/mutex stack traces
// wait* are used to carry arguments from gopark into park_m, because
// there's no stack to put them on. That is their sole purpose.
@@ -767,11 +768,6 @@ type p struct {
// gcStopTime is the nanotime timestamp that this P last entered _Pgcstop.
gcStopTime int64
- // pageTraceBuf is a buffer for writing out page allocation/free/scavenge traces.
- //
- // Used only if GOEXPERIMENT=pagetrace.
- pageTraceBuf pageTraceBuf
-
// Padding is no longer needed. False sharing is now not a worry because p is large enough
// that its size class is an integer multiple of the cache line size (for any of our architectures).
}
@@ -1236,6 +1232,10 @@ var (
timerpMask pMask
)
+// goarmsoftfp is used by runtime/cgo assembly.
+//
+//go:linkname goarmsoftfp
+
var (
// Pool of GC parked background workers. Entries are type
// *gcBgMarkWorkerNode.
@@ -1250,8 +1250,21 @@ var (
// Set on startup in asm_{386,amd64}.s
processorVersionInfo uint32
isIntel bool
+)
- // set by cmd/link on arm systems
+// set by cmd/link on arm systems
+// accessed using linkname by internal/runtime/atomic.
+//
+// goarm should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/creativeprojects/go-selfupdate
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname goarm
+var (
goarm uint8
goarmsoftfp uint8
)
diff --git a/src/runtime/runtime_test.go b/src/runtime/runtime_test.go
index 0839cd96b7..c1bf7f87db 100644
--- a/src/runtime/runtime_test.go
+++ b/src/runtime/runtime_test.go
@@ -10,7 +10,7 @@ import (
"io"
. "runtime"
"runtime/debug"
- "sort"
+ "slices"
"strings"
"sync"
"testing"
@@ -382,9 +382,7 @@ func BenchmarkGoroutineProfile(b *testing.B) {
b.StopTimer()
// Sort latencies then report percentiles.
- sort.Slice(latencies, func(i, j int) bool {
- return latencies[i] < latencies[j]
- })
+ slices.Sort(latencies)
b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
diff --git a/src/runtime/security_unix.go b/src/runtime/security_unix.go
index fa54090df2..6ef3b5b328 100644
--- a/src/runtime/security_unix.go
+++ b/src/runtime/security_unix.go
@@ -6,6 +6,10 @@
package runtime
+import (
+ "internal/stringslite"
+)
+
func secure() {
initSecureMode()
@@ -25,7 +29,7 @@ func secure() {
func secureEnv() {
var hasTraceback bool
for i := 0; i < len(envs); i++ {
- if hasPrefix(envs[i], "GOTRACEBACK=") {
+ if stringslite.HasPrefix(envs[i], "GOTRACEBACK=") {
hasTraceback = true
envs[i] = "GOTRACEBACK=none"
}
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index f86a19f705..f6b1b84f5f 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -57,6 +57,15 @@ func (t *semTable) rootFor(addr *uint32) *semaRoot {
return &t[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
}
+// sync_runtime_Semacquire should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+// - github.com/sagernet/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
func sync_runtime_Semacquire(addr *uint32) {
semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire)
@@ -67,6 +76,15 @@ func poll_runtime_Semacquire(addr *uint32) {
semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire)
}
+// sync_runtime_Semrelease should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gvisor.dev/gvisor
+// - github.com/sagernet/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) {
semrelease1(addr, handoff, skipframes)
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index 6ca87561e8..8ba498bdb2 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -752,6 +752,9 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
}
if docrash {
+ var crashSleepMicros uint32 = 5000
+ var watchdogTimeoutMicros uint32 = 2000 * crashSleepMicros
+
isCrashThread := false
if crashing.CompareAndSwap(0, 1) {
isCrashThread = true
@@ -769,19 +772,35 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
// The faulting m is crashing first so it is the faulting thread in the core dump (see issue #63277):
// in expected operation, the first m will wait until the last m has received the SIGQUIT,
// and then run crash/exit and the process is gone.
- // However, if it spends more than 5 seconds to send SIGQUIT to all ms,
- // any of ms may crash/exit the process after waiting for 5 seconds.
+ // However, if it spends more than 10 seconds to send SIGQUIT to all ms,
+ // any of ms may crash/exit the process after waiting for 10 seconds.
print("\n-----\n\n")
raiseproc(_SIGQUIT)
}
if isCrashThread {
- i := 0
- for (crashing.Load() < mcount()-int32(extraMLength.Load())) && i < 10 {
- i++
- usleep(500 * 1000)
+ // Sleep for short intervals so that we can crash quickly after all ms have received SIGQUIT.
+ // Reset the timer whenever we see more ms received SIGQUIT
+ // to make it have enough time to crash (see issue #64752).
+ timeout := watchdogTimeoutMicros
+ maxCrashing := crashing.Load()
+ for timeout > 0 && (crashing.Load() < mcount()-int32(extraMLength.Load())) {
+ usleep(crashSleepMicros)
+ timeout -= crashSleepMicros
+
+ if c := crashing.Load(); c > maxCrashing {
+ // We make progress, so reset the watchdog timeout
+ maxCrashing = c
+ timeout = watchdogTimeoutMicros
+ }
}
} else {
- usleep(5 * 1000 * 1000)
+ maxCrashing := int32(0)
+ c := crashing.Load()
+ for c > maxCrashing {
+ maxCrashing = c
+ usleep(watchdogTimeoutMicros)
+ c = crashing.Load()
+ }
}
printDebugLog()
crash()
@@ -953,10 +972,17 @@ func raisebadsignal(sig uint32, c *sigctxt) {
}
var handler uintptr
+ var flags int32
if sig >= _NSIG {
handler = _SIG_DFL
} else {
handler = atomic.Loaduintptr(&fwdSig[sig])
+ flags = sigtable[sig].flags
+ }
+
+ // If the signal is ignored, raising the signal is no-op.
+ if handler == _SIG_IGN || (handler == _SIG_DFL && flags&_SigIgn != 0) {
+ return
}
// Reset the signal handler and raise the signal.
diff --git a/src/runtime/sizeclasses.go b/src/runtime/sizeclasses.go
index 9314623453..bbcaa9e983 100644
--- a/src/runtime/sizeclasses.go
+++ b/src/runtime/sizeclasses.go
@@ -82,6 +82,7 @@ package runtime
// 8192 13 32768
const (
+ minHeapAlign = 8
_MaxSmallSize = 32768
smallSizeDiv = 8
smallSizeMax = 1024
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 4fbe056b78..78475735af 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -89,6 +89,15 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf
return to
}
+// makeslice should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname makeslice
func makeslice(et *_type, len, cap int) unsafe.Pointer {
mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
if overflow || mem > maxAlloc || len < 0 || len > cap {
@@ -152,6 +161,19 @@ func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
// new length so that the old length is not live (does not need to be
// spilled/restored) and the new length is returned (also does not need
// to be spilled/restored).
+//
+// growslice should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/chenzhuoyu/iasm
+// - github.com/cloudwego/dynamicgo
+// - github.com/ugorji/go/codec
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname growslice
func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
oldLen := newLen - num
if raceenabled {
@@ -298,6 +320,14 @@ func nextslicecap(newLen, oldCap int) int {
return newcap
}
+// reflect_growslice should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/dynamicgo
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname reflect_growslice reflect.growslice
func reflect_growslice(et *_type, old slice, num int) slice {
// Semantically equivalent to slices.Grow, except that the caller
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 6679cd993d..cdf859a7ff 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -415,6 +415,13 @@ func stackalloc(n uint32) stack {
v = unsafe.Pointer(s.base())
}
+ if traceAllocFreeEnabled() {
+ trace := traceTryAcquire()
+ if trace.ok() {
+ trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
+ traceRelease(trace)
+ }
+ }
if raceenabled {
racemalloc(v, uintptr(n))
}
@@ -458,6 +465,13 @@ func stackfree(stk stack) {
}
return
}
+ if traceAllocFreeEnabled() {
+ trace := traceTryAcquire()
+ if trace.ok() {
+ trace.GoroutineStackFree(uintptr(v))
+ traceRelease(trace)
+ }
+ }
if msanenabled {
msanfree(v, n)
}
diff --git a/src/runtime/string.go b/src/runtime/string.go
index e01b7fc744..5bdb25b9db 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -78,6 +78,16 @@ func concatstring5(buf *tmpBuf, a0, a1, a2, a3, a4 string) string {
// n is the length of the slice.
// Buf is a fixed-size buffer for the result,
// it is not nil if the result does not escape.
+//
+// slicebytetostring should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname slicebytetostring
func slicebytetostring(buf *tmpBuf, ptr *byte, n int) string {
if n == 0 {
// Turns out to be a relatively common case.
@@ -312,7 +322,7 @@ func gobytes(p *byte, n int) (b []byte) {
return
}
-// This is exported via linkname to assembly in syscall (for Plan9).
+// This is exported via linkname to assembly in syscall (for Plan9) and cgo.
//
//go:linkname gostring
func gostring(p *byte) string {
@@ -341,14 +351,6 @@ func gostringn(p *byte, l int) string {
return s
}
-func hasPrefix(s, prefix string) bool {
- return len(s) >= len(prefix) && s[:len(prefix)] == prefix
-}
-
-func hasSuffix(s, suffix string) bool {
- return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
-}
-
const (
maxUint64 = ^uint64(0)
maxInt64 = int64(maxUint64 >> 1)
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 34984d86ff..2aeb4774b9 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -11,6 +11,15 @@ import (
// Should be a built-in for unsafe.Pointer?
//
+// add should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - fortio.org/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname add
//go:nosplit
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + x)
@@ -83,6 +92,19 @@ func badsystemstack() {
//
// The (CPU-specific) implementations of this function are in memclr_*.s.
//
+// memclrNoHeapPointers should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/chenzhuoyu/iasm
+// - github.com/cloudwego/frugal
+// - github.com/dgraph-io/ristretto
+// - github.com/outcaste-io/ristretto
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname memclrNoHeapPointers
//go:noescape
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
@@ -103,12 +125,26 @@ func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
//
// Implementations are in memmove_*.s.
//
-//go:noescape
-func memmove(to, from unsafe.Pointer, n uintptr)
-
-// Outside assembly calls memmove. Make sure it has ABI wrappers.
+// Outside assembly calls memmove.
+//
+// memmove should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/dynamicgo
+// - github.com/cloudwego/frugal
+// - github.com/ebitengine/purego
+// - github.com/tetratelabs/wazero
+// - github.com/ugorji/go/codec
+// - gvisor.dev/gvisor
+// - github.com/sagernet/gvisor
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
//
//go:linkname memmove
+//go:noescape
+func memmove(to, from unsafe.Pointer, n uintptr)
//go:linkname reflect_memmove reflect.memmove
func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
@@ -120,6 +156,15 @@ const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
// in internal/bytealg/equal_*.s
//
+// memequal should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname memequal
//go:noescape
func memequal(a, b unsafe.Pointer, size uintptr) bool
@@ -129,6 +174,19 @@ func memequal(a, b unsafe.Pointer, size uintptr) bool
// compiles down to zero instructions.
// USE CAREFULLY!
//
+// noescape should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/gopkg
+// - github.com/ebitengine/purego
+// - github.com/hamba/avro/v2
+// - github.com/puzpuzpuz/xsync/v3
+// - github.com/songzhibin97/gkit
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname noescape
//go:nosplit
func noescape(p unsafe.Pointer) unsafe.Pointer {
x := uintptr(p)
@@ -207,6 +265,17 @@ func breakpoint()
//go:noescape
func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+// procyield should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/sagernet/sing-tun
+// - github.com/slackhq/nebula
+// - github.com/tailscale/wireguard-go
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname procyield
func procyield(cycles uint32)
type neverCallThisFunction struct{}
@@ -295,7 +364,18 @@ func getclosureptr() uintptr
func asmcgocall(fn, arg unsafe.Pointer) int32
func morestack()
+
+// morestack_noctxt should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname morestack_noctxt
func morestack_noctxt()
+
func rt0_go()
// return0 is a stub used to return 0 from deferproc.
@@ -380,7 +460,19 @@ func abort()
// Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrier1()
+
+// gcWriteBarrier2 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/bytedance/sonic
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname gcWriteBarrier2
func gcWriteBarrier2()
+
func gcWriteBarrier3()
func gcWriteBarrier4()
func gcWriteBarrier5()
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index a7ce9c3a7e..10cdcf9c6e 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -196,6 +196,14 @@ func (ci *Frames) Next() (frame Frame, more bool) {
// runtime_FrameStartLine returns the start line of the function in a Frame.
//
+// runtime_FrameStartLine should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/grafana/pyroscope-go/godeltaprof
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname runtime_FrameStartLine runtime/pprof.runtime_FrameStartLine
func runtime_FrameStartLine(f *Frame) int {
return f.startLine
@@ -205,6 +213,14 @@ func runtime_FrameStartLine(f *Frame) int {
// For generic functions this differs from f.Function in that this doesn't replace
// the shape name to "...".
//
+// runtime_FrameSymbolName should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/grafana/pyroscope-go/godeltaprof
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname runtime_FrameSymbolName runtime/pprof.runtime_FrameSymbolName
func runtime_FrameSymbolName(f *Frame) string {
if !f.funcInfo.valid() {
@@ -218,6 +234,15 @@ func runtime_FrameSymbolName(f *Frame) string {
// runtime_expandFinalInlineFrame expands the final pc in stk to include all
// "callers" if pc is inline.
//
+// runtime_expandFinalInlineFrame should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/grafana/pyroscope-go/godeltaprof
+// - github.com/pyroscope-io/godeltaprof
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
// TODO: It would be more efficient to report only physical PCs to pprof and
@@ -437,8 +462,19 @@ type modulehash struct {
// To make sure the map isn't collected, we keep a second reference here.
var pinnedTypemaps []map[typeOff]*_type
-var firstmoduledata moduledata // linker symbol
+var firstmoduledata moduledata // linker symbol
+
+// lastmoduledatap should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname lastmoduledatap
var lastmoduledatap *moduledata // linker symbol
+
var modulesSlice *[]*moduledata // see activeModules
// activeModules returns a slice of active modules.
@@ -547,6 +583,15 @@ func moduledataverify() {
const debugPcln = false
+// moduledataverify1 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname moduledataverify1
func moduledataverify1(datap *moduledata) {
// Check that the pclntab's format is valid.
hdr := datap.pcHeader
@@ -674,6 +719,16 @@ func (md *moduledata) funcName(nameOff int32) string {
// If pc represents multiple functions because of inlining, it returns
// the *Func describing the innermost function, but with an entry of
// the outermost function.
+//
+// For completely unclear reasons, even though they can import runtime,
+// some widely used packages access this using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname FuncForPC
func FuncForPC(pc uintptr) *Func {
f := findfunc(pc)
if !f.valid() {
@@ -784,16 +839,37 @@ func (f *_func) isInlined() bool {
}
// entry returns the entry PC for f.
+//
+// entry should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
func (f funcInfo) entry() uintptr {
return f.datap.textAddr(f.entryOff)
}
+//go:linkname badFuncInfoEntry runtime.funcInfo.entry
+func badFuncInfoEntry(funcInfo) uintptr
+
// findfunc looks up function metadata for a PC.
//
// It is nosplit because it's part of the isgoexception
// implementation.
//
+// findfunc should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:nosplit
+//go:linkname findfunc
func findfunc(pc uintptr) funcInfo {
datap := findmoduledatap(pc)
if datap == nil {
@@ -839,6 +915,13 @@ func (f funcInfo) srcFunc() srcFunc {
return srcFunc{f.datap, f.nameOff, f.startLine, f.funcID}
}
+// name should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
func (s srcFunc) name() string {
if s.datap == nil {
return ""
@@ -846,6 +929,9 @@ func (s srcFunc) name() string {
return s.datap.funcName(s.nameOff)
}
+//go:linkname badSrcFuncName runtime.srcFunc.name
+func badSrcFuncName(srcFunc) string
+
type pcvalueCache struct {
entries [2][8]pcvalueCacheEnt
inUse int
@@ -1035,6 +1121,15 @@ func funcfile(f funcInfo, fileno int32) string {
return "?"
}
+// funcline1 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname funcline1
func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) {
datap := f.datap
if !f.valid() {
@@ -1101,6 +1196,16 @@ func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, strict bool) int32
}
// Like pcdatavalue, but also return the start PC of this PCData value.
+//
+// pcdatavalue2 should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname pcdatavalue2
func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) {
if table >= f.npcdata {
return -1, 0
@@ -1129,6 +1234,16 @@ func funcdata(f funcInfo, i uint8) unsafe.Pointer {
}
// step advances to the next pc, value pair in the encoded table.
+//
+// step should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname step
func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) {
// For both uvdelta and pcdelta, the common case (~70%)
// is that they are a single byte. If so, avoid calling readvarint.
@@ -1174,6 +1289,15 @@ type stackmap struct {
bytedata [1]byte // bitmaps, each starting on a byte boundary
}
+// stackmapdata should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname stackmapdata
//go:nowritebarrier
func stackmapdata(stkmap *stackmap, n int32) bitvector {
// Check this invariant only when stackDebug is on at all.
diff --git a/src/runtime/symtabinl.go b/src/runtime/symtabinl.go
index 9273b49b11..faa01decb9 100644
--- a/src/runtime/symtabinl.go
+++ b/src/runtime/symtabinl.go
@@ -4,7 +4,10 @@
package runtime
-import "internal/abi"
+import (
+ "internal/abi"
+ _ "unsafe" // for linkname
+)
// inlinedCall is the encoding of entries in the FUNCDATA_InlTree table.
type inlinedCall struct {
@@ -51,6 +54,16 @@ type inlineFrame struct {
// This unwinder uses non-strict handling of PC because it's assumed this is
// only ever used for symbolic debugging. If things go really wrong, it'll just
// fall back to the outermost frame.
+//
+// newInlineUnwinder should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname newInlineUnwinder
func newInlineUnwinder(f funcInfo, pc uintptr) (inlineUnwinder, inlineFrame) {
inldata := funcdata(f, abi.FUNCDATA_InlTree)
if inldata == nil {
@@ -90,6 +103,16 @@ func (u *inlineUnwinder) isInlined(uf inlineFrame) bool {
}
// srcFunc returns the srcFunc representing the given frame.
+//
+// srcFunc should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+// The go:linkname is below.
func (u *inlineUnwinder) srcFunc(uf inlineFrame) srcFunc {
if uf.index < 0 {
return u.f.srcFunc()
@@ -103,6 +126,9 @@ func (u *inlineUnwinder) srcFunc(uf inlineFrame) srcFunc {
}
}
+//go:linkname badSrcFunc runtime.(*inlineUnwinder).srcFunc
+func badSrcFunc(*inlineUnwinder, inlineFrame) srcFunc
+
// fileLine returns the file name and line number of the call within the given
// frame. As a convenience, for the innermost frame, it returns the file and
// line of the PC this unwinder was started at (often this is a call to another
diff --git a/src/runtime/symtabinl_test.go b/src/runtime/symtabinl_test.go
index df524aec4a..3c7cb2e595 100644
--- a/src/runtime/symtabinl_test.go
+++ b/src/runtime/symtabinl_test.go
@@ -6,6 +6,7 @@ package runtime
import (
"internal/abi"
+ "internal/stringslite"
"runtime/internal/sys"
)
@@ -50,7 +51,7 @@ func XTestInlineUnwinder(t TestingT) {
for ; uf.valid(); uf = u.next(uf) {
file, line := u.fileLine(uf)
const wantFile = "symtabinl_test.go"
- if !hasSuffix(file, wantFile) {
+ if !stringslite.HasSuffix(file, wantFile) {
t.Errorf("tiuTest+%#x: want file ...%s, got %s", pc-pc1, wantFile, file)
}
@@ -58,10 +59,10 @@ func XTestInlineUnwinder(t TestingT) {
name := sf.name()
const namePrefix = "runtime."
- if hasPrefix(name, namePrefix) {
+ if stringslite.HasPrefix(name, namePrefix) {
name = name[len(namePrefix):]
}
- if !hasPrefix(name, "tiu") {
+ if !stringslite.HasPrefix(name, "tiu") {
t.Errorf("tiuTest+%#x: unexpected function %s", pc-pc1, name)
}
diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go
index 8e728b9d17..1e4b2ac79e 100644
--- a/src/runtime/sys_darwin.go
+++ b/src/runtime/sys_darwin.go
@@ -16,6 +16,10 @@ import (
// and we need to know whether to check 32 or 64 bits of the result.
// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
+// golang.org/x/sys linknames syscall_syscall
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_syscall syscall.syscall
//go:nosplit
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
@@ -38,6 +42,17 @@ func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
}
func syscallX()
+// golang.org/x/sys linknames syscall.syscall6
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
+// syscall.syscall6 is meant for package syscall (and x/sys),
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/tetratelabs/wazero
+//
+// See go.dev/issue/67401.
+//
//go:linkname syscall_syscall6 syscall.syscall6
//go:nosplit
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
@@ -49,6 +64,10 @@ func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
}
func syscall6()
+// golang.org/x/sys linknames syscall.syscall9
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_syscall9 syscall.syscall9
//go:nosplit
//go:cgo_unsafe_args
@@ -71,6 +90,10 @@ func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
}
func syscall6X()
+// golang.org/x/sys linknames syscall.syscallPtr
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_syscallPtr syscall.syscallPtr
//go:nosplit
func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
@@ -82,6 +105,10 @@ func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
}
func syscallPtr()
+// golang.org/x/sys linknames syscall_rawSyscall
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:nosplit
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
@@ -90,6 +117,10 @@ func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
return args.r1, args.r2, args.err
}
+// golang.org/x/sys linknames syscall_rawSyscall6
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
//go:nosplit
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
@@ -349,6 +380,15 @@ func nanotime1() int64 {
}
func nanotime_trampoline()
+// walltime should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname walltime
//go:nosplit
//go:cgo_unsafe_args
func walltime() (int64, int32) {
diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s
index d105585b7e..ba4988b723 100644
--- a/src/runtime/sys_linux_ppc64x.s
+++ b/src/runtime/sys_linux_ppc64x.s
@@ -211,7 +211,7 @@ TEXT runtime·walltime(SB),NOSPLIT,$16-12
MOVD $0, R3 // CLOCK_REALTIME
MOVD runtime·vdsoClockgettimeSym(SB), R12 // Check for VDSO availability
- CMP R12, R0
+ CMP R12, $0
BEQ fallback
// Set vdsoPC and vdsoSP for SIGPROF traceback.
@@ -305,7 +305,7 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
MOVD g_m(g), R21 // R21 = m
MOVD runtime·vdsoClockgettimeSym(SB), R12 // Check for VDSO availability
- CMP R12, R0
+ CMP R12, $0
BEQ fallback
// Set vdsoPC and vdsoSP for SIGPROF traceback.
diff --git a/src/runtime/sys_openbsd3.go b/src/runtime/sys_openbsd3.go
index 269bf86f10..de09ec5e25 100644
--- a/src/runtime/sys_openbsd3.go
+++ b/src/runtime/sys_openbsd3.go
@@ -17,6 +17,10 @@ import (
// and we need to know whether to check 32 or 64 bits of the result.
// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
+// golang.org/x/sys linknames syscall_syscall
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_syscall syscall.syscall
//go:nosplit
//go:cgo_unsafe_args
@@ -39,6 +43,10 @@ func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
}
func syscallX()
+// golang.org/x/sys linknames syscall.syscall6
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_syscall6 syscall.syscall6
//go:nosplit
//go:cgo_unsafe_args
@@ -61,6 +69,10 @@ func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
}
func syscall6X()
+// golang.org/x/sys linknames syscall.syscall10
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_syscall10 syscall.syscall10
//go:nosplit
//go:cgo_unsafe_args
@@ -83,6 +95,10 @@ func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1
}
func syscall10X()
+// golang.org/x/sys linknames syscall_rawSyscall
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:nosplit
//go:cgo_unsafe_args
@@ -91,6 +107,10 @@ func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
return
}
+// golang.org/x/sys linknames syscall_rawSyscall6
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
//go:nosplit
//go:cgo_unsafe_args
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index f0e7661a1b..69d720a395 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -423,6 +423,10 @@ func syscall_loadsystemlibrary(filename *uint16) (handle, err uintptr) {
return
}
+// golang.org/x/sys linknames syscall.loadlibrary
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_loadlibrary syscall.loadlibrary
func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryW)), uintptr(unsafe.Pointer(filename)))
@@ -433,6 +437,10 @@ func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
return
}
+// golang.org/x/sys linknames syscall.getprocaddress
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
//go:linkname syscall_getprocaddress syscall.getprocaddress
func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
outhandle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_GetProcAddress)), handle, uintptr(unsafe.Pointer(procname)))
diff --git a/src/runtime/testdata/testexithooks/testexithooks.go b/src/runtime/testdata/testexithooks/testexithooks.go
index ceb3326c4f..d734aacb2d 100644
--- a/src/runtime/testdata/testexithooks/testexithooks.go
+++ b/src/runtime/testdata/testexithooks/testexithooks.go
@@ -6,7 +6,9 @@ package main
import (
"flag"
+ "internal/runtime/exithook"
"os"
+ "time"
_ "unsafe"
)
@@ -25,27 +27,26 @@ func main() {
testPanics()
case "callsexit":
testHookCallsExit()
+ case "exit2":
+ testExit2()
default:
panic("unknown mode")
}
}
-//go:linkname runtime_addExitHook runtime.addExitHook
-func runtime_addExitHook(f func(), runOnNonZeroExit bool)
-
func testSimple() {
f1 := func() { println("foo") }
f2 := func() { println("bar") }
- runtime_addExitHook(f1, false)
- runtime_addExitHook(f2, false)
+ exithook.Add(exithook.Hook{F: f1})
+ exithook.Add(exithook.Hook{F: f2})
// no explicit call to os.Exit
}
func testGoodExit() {
f1 := func() { println("apple") }
f2 := func() { println("orange") }
- runtime_addExitHook(f1, false)
- runtime_addExitHook(f2, false)
+ exithook.Add(exithook.Hook{F: f1})
+ exithook.Add(exithook.Hook{F: f2})
// explicit call to os.Exit
os.Exit(0)
}
@@ -56,11 +57,11 @@ func testBadExit() {
f3 := func() { println("blek") }
f4 := func() { println("blub") }
f5 := func() { println("blat") }
- runtime_addExitHook(f1, false)
- runtime_addExitHook(f2, true)
- runtime_addExitHook(f3, false)
- runtime_addExitHook(f4, true)
- runtime_addExitHook(f5, false)
+ exithook.Add(exithook.Hook{F: f1})
+ exithook.Add(exithook.Hook{F: f2, RunOnFailure: true})
+ exithook.Add(exithook.Hook{F: f3})
+ exithook.Add(exithook.Hook{F: f4, RunOnFailure: true})
+ exithook.Add(exithook.Hook{F: f5})
os.Exit(1)
}
@@ -68,9 +69,9 @@ func testPanics() {
f1 := func() { println("ok") }
f2 := func() { panic("BADBADBAD") }
f3 := func() { println("good") }
- runtime_addExitHook(f1, true)
- runtime_addExitHook(f2, true)
- runtime_addExitHook(f3, true)
+ exithook.Add(exithook.Hook{F: f1, RunOnFailure: true})
+ exithook.Add(exithook.Hook{F: f2, RunOnFailure: true})
+ exithook.Add(exithook.Hook{F: f3, RunOnFailure: true})
os.Exit(0)
}
@@ -78,8 +79,17 @@ func testHookCallsExit() {
f1 := func() { println("ok") }
f2 := func() { os.Exit(1) }
f3 := func() { println("good") }
- runtime_addExitHook(f1, true)
- runtime_addExitHook(f2, true)
- runtime_addExitHook(f3, true)
+ exithook.Add(exithook.Hook{F: f1, RunOnFailure: true})
+ exithook.Add(exithook.Hook{F: f2, RunOnFailure: true})
+ exithook.Add(exithook.Hook{F: f3, RunOnFailure: true})
os.Exit(1)
}
+
+func testExit2() {
+ f1 := func() { time.Sleep(100 * time.Millisecond) }
+ exithook.Add(exithook.Hook{F: f1})
+ for range 10 {
+ go os.Exit(0)
+ }
+ os.Exit(0)
+}
diff --git a/src/runtime/testdata/testprog/coro.go b/src/runtime/testdata/testprog/coro.go
new file mode 100644
index 0000000000..032215b801
--- /dev/null
+++ b/src/runtime/testdata/testprog/coro.go
@@ -0,0 +1,185 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc
+
+package main
+
+import (
+ "fmt"
+ "iter"
+ "runtime"
+)
+
+func init() {
+ register("CoroLockOSThreadIterLock", func() {
+ println("expect: OK")
+ CoroLockOSThread(callerExhaust, iterLock)
+ })
+ register("CoroLockOSThreadIterLockYield", func() {
+ println("expect: OS thread locking must match")
+ CoroLockOSThread(callerExhaust, iterLockYield)
+ })
+ register("CoroLockOSThreadLock", func() {
+ println("expect: OK")
+ CoroLockOSThread(callerExhaustLocked, iterSimple)
+ })
+ register("CoroLockOSThreadLockIterNested", func() {
+ println("expect: OK")
+ CoroLockOSThread(callerExhaustLocked, iterNested)
+ })
+ register("CoroLockOSThreadLockIterLock", func() {
+ println("expect: OK")
+ CoroLockOSThread(callerExhaustLocked, iterLock)
+ })
+ register("CoroLockOSThreadLockIterLockYield", func() {
+ println("expect: OS thread locking must match")
+ CoroLockOSThread(callerExhaustLocked, iterLockYield)
+ })
+ register("CoroLockOSThreadLockIterYieldNewG", func() {
+ println("expect: OS thread locking must match")
+ CoroLockOSThread(callerExhaustLocked, iterYieldNewG)
+ })
+ register("CoroLockOSThreadLockAfterPull", func() {
+ println("expect: OS thread locking must match")
+ CoroLockOSThread(callerLockAfterPull, iterSimple)
+ })
+ register("CoroLockOSThreadStopLocked", func() {
+ println("expect: OK")
+ CoroLockOSThread(callerStopLocked, iterSimple)
+ })
+ register("CoroLockOSThreadStopLockedIterNested", func() {
+ println("expect: OK")
+ CoroLockOSThread(callerStopLocked, iterNested)
+ })
+}
+
+func CoroLockOSThread(driver func(iter.Seq[int]) error, seq iter.Seq[int]) {
+ if err := driver(seq); err != nil {
+ println("error:", err.Error())
+ return
+ }
+ println("OK")
+}
+
+func callerExhaust(i iter.Seq[int]) error {
+ next, _ := iter.Pull(i)
+ for {
+ v, ok := next()
+ if !ok {
+ break
+ }
+ if v != 5 {
+ return fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ }
+ return nil
+}
+
+func callerExhaustLocked(i iter.Seq[int]) error {
+ runtime.LockOSThread()
+ next, _ := iter.Pull(i)
+ for {
+ v, ok := next()
+ if !ok {
+ break
+ }
+ if v != 5 {
+ return fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ }
+ runtime.UnlockOSThread()
+ return nil
+}
+
+func callerLockAfterPull(i iter.Seq[int]) error {
+ n := 0
+ next, _ := iter.Pull(i)
+ for {
+ runtime.LockOSThread()
+ n++
+ v, ok := next()
+ if !ok {
+ break
+ }
+ if v != 5 {
+ return fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ }
+ for range n {
+ runtime.UnlockOSThread()
+ }
+ return nil
+}
+
+func callerStopLocked(i iter.Seq[int]) error {
+ runtime.LockOSThread()
+ next, stop := iter.Pull(i)
+ v, _ := next()
+ stop()
+ if v != 5 {
+ return fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ runtime.UnlockOSThread()
+ return nil
+}
+
+func iterSimple(yield func(int) bool) {
+ for range 3 {
+ if !yield(5) {
+ return
+ }
+ }
+}
+
+func iterNested(yield func(int) bool) {
+ next, stop := iter.Pull(iterSimple)
+ for {
+ v, ok := next()
+ if ok {
+ if !yield(v) {
+ stop()
+ }
+ } else {
+ return
+ }
+ }
+}
+
+func iterLock(yield func(int) bool) {
+ for range 3 {
+ runtime.LockOSThread()
+ runtime.UnlockOSThread()
+
+ if !yield(5) {
+ return
+ }
+ }
+}
+
+func iterLockYield(yield func(int) bool) {
+ for range 3 {
+ runtime.LockOSThread()
+ ok := yield(5)
+ runtime.UnlockOSThread()
+ if !ok {
+ return
+ }
+ }
+}
+
+func iterYieldNewG(yield func(int) bool) {
+ for range 3 {
+ done := make(chan struct{})
+ var ok bool
+ go func() {
+ ok = yield(5)
+ done <- struct{}{}
+ }()
+ <-done
+ if !ok {
+ return
+ }
+ }
+}
diff --git a/src/runtime/testdata/testprog/crash.go b/src/runtime/testdata/testprog/crash.go
index 38c8f6a2fa..bdc395f652 100644
--- a/src/runtime/testdata/testprog/crash.go
+++ b/src/runtime/testdata/testprog/crash.go
@@ -77,7 +77,7 @@ func DoublePanic() {
type exampleError struct{}
func (e exampleError) Error() string {
- panic("important error message")
+ panic("important multi-line\nerror message")
}
func ErrorPanic() {
@@ -97,7 +97,7 @@ func DoubleErrorPanic() {
type exampleStringer struct{}
func (s exampleStringer) String() string {
- panic("important stringer message")
+ panic("important multi-line\nstringer message")
}
func StringerPanic() {
@@ -115,7 +115,7 @@ func DoubleStringerPanic() {
}
func StringPanic() {
- panic("important string message")
+ panic("important multi-line\nstring message")
}
func NilPanic() {
diff --git a/src/runtime/testdata/testprog/panicprint.go b/src/runtime/testdata/testprog/panicprint.go
index c8deabe2ab..4ce958ba3d 100644
--- a/src/runtime/testdata/testprog/panicprint.go
+++ b/src/runtime/testdata/testprog/panicprint.go
@@ -31,7 +31,7 @@ func panicCustomComplex128() {
}
func panicCustomString() {
- panic(MyString("Panic"))
+ panic(MyString("Panic\nline two"))
}
func panicCustomBool() {
diff --git a/src/runtime/testdata/testprogcgo/coro.go b/src/runtime/testdata/testprogcgo/coro.go
new file mode 100644
index 0000000000..e0cb945112
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/coro.go
@@ -0,0 +1,185 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc && !windows
+
+package main
+
+/*
+#include <stdint.h> // for uintptr_t
+
+void go_callback_coro(uintptr_t handle);
+
+static void call_go(uintptr_t handle) {
+ go_callback_coro(handle);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "iter"
+ "runtime/cgo"
+)
+
+func init() {
+ register("CoroCgoIterCallback", func() {
+ println("expect: OK")
+ CoroCgo(callerExhaust, iterCallback)
+ })
+ register("CoroCgoIterCallbackYield", func() {
+ println("expect: OS thread locking must match")
+ CoroCgo(callerExhaust, iterCallbackYield)
+ })
+ register("CoroCgoCallback", func() {
+ println("expect: OK")
+ CoroCgo(callerExhaustCallback, iterSimple)
+ })
+ register("CoroCgoCallbackIterNested", func() {
+ println("expect: OK")
+ CoroCgo(callerExhaustCallback, iterNested)
+ })
+ register("CoroCgoCallbackIterCallback", func() {
+ println("expect: OK")
+ CoroCgo(callerExhaustCallback, iterCallback)
+ })
+ register("CoroCgoCallbackIterCallbackYield", func() {
+ println("expect: OS thread locking must match")
+ CoroCgo(callerExhaustCallback, iterCallbackYield)
+ })
+ register("CoroCgoCallbackAfterPull", func() {
+ println("expect: OS thread locking must match")
+ CoroCgo(callerCallbackAfterPull, iterSimple)
+ })
+ register("CoroCgoStopCallback", func() {
+ println("expect: OK")
+ CoroCgo(callerStopCallback, iterSimple)
+ })
+ register("CoroCgoStopCallbackIterNested", func() {
+ println("expect: OK")
+ CoroCgo(callerStopCallback, iterNested)
+ })
+}
+
+var toCall func()
+
+//export go_callback_coro
+func go_callback_coro(handle C.uintptr_t) {
+ h := cgo.Handle(handle)
+ h.Value().(func())()
+ h.Delete()
+}
+
+func callFromC(f func()) {
+ C.call_go(C.uintptr_t(cgo.NewHandle(f)))
+}
+
+func CoroCgo(driver func(iter.Seq[int]) error, seq iter.Seq[int]) {
+ if err := driver(seq); err != nil {
+ println("error:", err.Error())
+ return
+ }
+ println("OK")
+}
+
+func callerExhaust(i iter.Seq[int]) error {
+ next, _ := iter.Pull(i)
+ for {
+ v, ok := next()
+ if !ok {
+ break
+ }
+ if v != 5 {
+ return fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ }
+ return nil
+}
+
+func callerExhaustCallback(i iter.Seq[int]) (err error) {
+ callFromC(func() {
+ next, _ := iter.Pull(i)
+ for {
+ v, ok := next()
+ if !ok {
+ break
+ }
+ if v != 5 {
+ err = fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ }
+ })
+ return err
+}
+
+func callerStopCallback(i iter.Seq[int]) (err error) {
+ callFromC(func() {
+ next, stop := iter.Pull(i)
+ v, _ := next()
+ stop()
+ if v != 5 {
+ err = fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ })
+ return err
+}
+
+func callerCallbackAfterPull(i iter.Seq[int]) (err error) {
+ next, _ := iter.Pull(i)
+ callFromC(func() {
+ for {
+ v, ok := next()
+ if !ok {
+ break
+ }
+ if v != 5 {
+ err = fmt.Errorf("bad iterator: wanted value %d, got %d", 5, v)
+ }
+ }
+ })
+ return err
+}
+
+func iterSimple(yield func(int) bool) {
+ for range 3 {
+ if !yield(5) {
+ return
+ }
+ }
+}
+
+func iterNested(yield func(int) bool) {
+ next, stop := iter.Pull(iterSimple)
+ for {
+ v, ok := next()
+ if ok {
+ if !yield(v) {
+ stop()
+ }
+ } else {
+ return
+ }
+ }
+}
+
+func iterCallback(yield func(int) bool) {
+ for range 3 {
+ callFromC(func() {})
+ if !yield(5) {
+ return
+ }
+ }
+}
+
+func iterCallbackYield(yield func(int) bool) {
+ for range 3 {
+ var ok bool
+ callFromC(func() {
+ ok = yield(5)
+ })
+ if !ok {
+ return
+ }
+ }
+}
diff --git a/src/runtime/testdata/testprogcgo/issue29707.go b/src/runtime/testdata/testprogcgo/issue29707.go
deleted file mode 100644
index 7d9299f131..0000000000
--- a/src/runtime/testdata/testprogcgo/issue29707.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !plan9 && !windows
-// +build !plan9,!windows
-
-// This is for issue #29707
-
-package main
-
-/*
-#include <pthread.h>
-
-extern void* callbackTraceParser(void*);
-typedef void* (*cbTraceParser)(void*);
-
-static void testCallbackTraceParser(cbTraceParser cb) {
- pthread_t thread_id;
- pthread_create(&thread_id, NULL, cb, NULL);
- pthread_join(thread_id, NULL);
-}
-*/
-import "C"
-
-import (
- "bytes"
- "fmt"
- traceparser "internal/trace"
- "runtime/trace"
- "time"
- "unsafe"
-)
-
-func init() {
- register("CgoTraceParser", CgoTraceParser)
-}
-
-//export callbackTraceParser
-func callbackTraceParser(unsafe.Pointer) unsafe.Pointer {
- time.Sleep(time.Millisecond)
- return nil
-}
-
-func CgoTraceParser() {
- buf := new(bytes.Buffer)
-
- trace.Start(buf)
- C.testCallbackTraceParser(C.cbTraceParser(C.callbackTraceParser))
- trace.Stop()
-
- _, err := traceparser.Parse(buf, "")
- if err == traceparser.ErrTimeOrder {
- fmt.Println("ErrTimeOrder")
- } else if err != nil {
- fmt.Println("Parse error: ", err)
- } else {
- fmt.Println("OK")
- }
-}
diff --git a/src/runtime/time_fake.go b/src/runtime/time_fake.go
index 9e24f70931..aad1950c48 100644
--- a/src/runtime/time_fake.go
+++ b/src/runtime/time_fake.go
@@ -31,6 +31,7 @@ var faketimeState struct {
lastfd uintptr
}
+//go:linkname nanotime
//go:nosplit
func nanotime() int64 {
return faketime
diff --git a/src/runtime/time_nofake.go b/src/runtime/time_nofake.go
index 70a2102b22..130ff12816 100644
--- a/src/runtime/time_nofake.go
+++ b/src/runtime/time_nofake.go
@@ -14,11 +14,37 @@ import "unsafe"
// Zero means not to use faketime.
var faketime int64
+// Exported via linkname for use by time and internal/poll.
+//
+// Many external packages also linkname nanotime for a fast monotonic time.
+// Such code should be updated to use:
+//
+// var start = time.Now() // at init time
+//
+// and then replace nanotime() with time.Since(start), which is equally fast.
+//
+// However, all the code linknaming nanotime is never going to go away.
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname nanotime
//go:nosplit
func nanotime() int64 {
return nanotime1()
}
+// overrideWrite allows write to be redirected externally, by
+// linkname'ing this and set it to a write function.
+//
+// overrideWrite should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - golang.zx2c4.com/wireguard/windows
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname overrideWrite
var overrideWrite func(fd uintptr, p unsafe.Pointer, n int32) int32
// write must be nosplit on Windows (see write1)
diff --git a/src/runtime/timestub.go b/src/runtime/timestub.go
index 1d2926b43d..da8699b5ee 100644
--- a/src/runtime/timestub.go
+++ b/src/runtime/timestub.go
@@ -11,6 +11,17 @@ package runtime
import _ "unsafe" // for go:linkname
+// time_now should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - gitee.com/quant1x/gox
+// - github.com/phuslu/log
+// - github.com/sethvargo/go-limiter
+// - github.com/ulule/limiter/v3
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
//go:linkname time_now time.now
func time_now() (sec int64, nsec int32, mono int64) {
sec, nsec = walltime()
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 6e0808ea76..adf7b0951d 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -67,6 +67,7 @@ var trace struct {
// There are 2 of each: one for gen%2, one for 1-gen%2.
stackTab [2]traceStackTable // maps stack traces to unique ids
stringTab [2]traceStringTable // maps strings to unique ids
+ typeTab [2]traceTypeTable // maps type pointers to unique ids
// cpuLogRead accepts CPU profile samples from the signal handler where
// they're generated. There are two profBufs here: one for gen%2, one for
@@ -110,6 +111,10 @@ var trace struct {
// as a publication barrier.
enabled bool
+ // enabledWithAllocFree is set if debug.traceallocfree is != 0 when tracing begins.
+ // It follows the same synchronization protocol as enabled.
+ enabledWithAllocFree bool
+
// Trace generation counter.
gen atomic.Uintptr
lastNonZeroGen uintptr // last non-zero value of gen
@@ -126,6 +131,12 @@ var trace struct {
//
// Mutated only during stop-the-world.
seqGC uint64
+
+ // minPageHeapAddr is the minimum address of the page heap when tracing started.
+ minPageHeapAddr uint64
+
+ // debugMalloc is the value of debug.malloc before tracing began.
+ debugMalloc bool
}
// Trace public API.
@@ -216,6 +227,10 @@ func StartTrace() error {
// Prevent sysmon from running any code that could generate events.
lock(&sched.sysmonlock)
+ // Grab the minimum page heap address. All Ps are stopped, so it's safe to read this since
+ // nothing can allocate heap memory.
+ trace.minPageHeapAddr = uint64(mheap_.pages.inUse.ranges[0].base.addr())
+
// Reset mSyscallID on all Ps while we have them stationary and the trace is disabled.
for _, pp := range allp {
pp.trace.mSyscallID = -1
@@ -236,6 +251,12 @@ func StartTrace() error {
// After trace.gen is updated, other Ms may start creating trace buffers and emitting
// data into them.
trace.enabled = true
+ if debug.traceallocfree.Load() != 0 {
+ // Enable memory events since the GODEBUG is set.
+ trace.debugMalloc = debug.malloc
+ trace.enabledWithAllocFree = true
+ debug.malloc = true
+ }
trace.gen.Store(firstGen)
// Wait for exitingSyscall to drain.
@@ -267,6 +288,11 @@ func StartTrace() error {
tl.GCActive()
}
+ // Dump a snapshot of memory, if enabled.
+ if trace.enabledWithAllocFree {
+ traceSnapshotMemory(firstGen)
+ }
+
// Record the heap goal so we have it at the very beginning of the trace.
tl.HeapGoal()
@@ -297,6 +323,10 @@ func StopTrace() {
// altogether instead of advancing to the next generation.
//
// traceAdvanceSema must not be held.
+//
+// traceAdvance is called by golang.org/x/exp/trace using linkname.
+//
+//go:linkname traceAdvance
func traceAdvance(stopTrace bool) {
semacquire(&traceAdvanceSema)
@@ -556,6 +586,7 @@ func traceAdvance(stopTrace bool) {
// stacks may generate new strings.
traceCPUFlush(gen)
trace.stackTab[gen%2].dump(gen)
+ trace.typeTab[gen%2].dump(gen)
trace.stringTab[gen%2].reset(gen)
// That's it. This generation is done producing buffers.
@@ -585,6 +616,16 @@ func traceAdvance(stopTrace bool) {
// Finish off CPU profile reading.
traceStopReadCPU()
+
+ // Reset debug.malloc if necessary. Note that this is set in a racy
+ // way; that's OK. Some mallocs may still enter into the debug.malloc
+ // block, but they won't generate events because tracing is disabled.
+ // That is, it's OK if mallocs read a stale debug.malloc or
+ // trace.enabledWithAllocFree value.
+ if trace.enabledWithAllocFree {
+ trace.enabledWithAllocFree = false
+ debug.malloc = trace.debugMalloc
+ }
} else {
// Go over each P and emit a status event for it if necessary.
//
diff --git a/src/runtime/trace_cgo_test.go b/src/runtime/trace_cgo_test.go
index 298e139f39..f0db3b7ffb 100644
--- a/src/runtime/trace_cgo_test.go
+++ b/src/runtime/trace_cgo_test.go
@@ -11,7 +11,6 @@ import (
"fmt"
"internal/testenv"
"internal/trace"
- tracev2 "internal/trace/v2"
"io"
"os"
"runtime"
@@ -42,10 +41,6 @@ func TestTraceUnwindCGO(t *testing.T) {
for _, category := range wantLogs {
logs[category] = nil
}
- logsV2 := make(map[string]*tracev2.Event)
- for _, category := range wantLogs {
- logsV2[category] = nil
- }
for _, tracefpunwindoff := range []int{1, 0} {
env := fmt.Sprintf("GODEBUG=tracefpunwindoff=%d", tracefpunwindoff)
got := runBuiltTestProg(t, exe, "Trace", env)
@@ -61,8 +56,8 @@ func TestTraceUnwindCGO(t *testing.T) {
}
for category := range logs {
event := mustFindLogV2(t, bytes.NewReader(traceData), category)
- if wantEvent := logsV2[category]; wantEvent == nil {
- logsV2[category] = &event
+ if wantEvent := logs[category]; wantEvent == nil {
+ logs[category] = &event
} else if got, want := dumpStackV2(&event), dumpStackV2(wantEvent); got != want {
t.Errorf("%q: got stack:\n%s\nwant stack:\n%s\n", category, got, want)
}
@@ -70,53 +65,12 @@ func TestTraceUnwindCGO(t *testing.T) {
}
}
-// mustFindLog returns the EvUserLog event with the given category in events. It
-// fails if no event or multiple events match the category.
-func mustFindLog(t *testing.T, events []*trace.Event, category string) *trace.Event {
- t.Helper()
- var candidates []*trace.Event
- for _, e := range events {
- if e.Type == trace.EvUserLog && len(e.SArgs) >= 1 && e.SArgs[0] == category {
- candidates = append(candidates, e)
- }
- }
- if len(candidates) == 0 {
- t.Errorf("could not find log with category: %q", category)
- } else if len(candidates) > 1 {
- t.Errorf("found more than one log with category: %q", category)
- }
- return candidates[0]
-}
-
-// dumpStack returns e.Stk as a string.
-func dumpStack(e *trace.Event) string {
- var buf bytes.Buffer
- for _, f := range e.Stk {
- file := strings.TrimPrefix(f.File, runtime.GOROOT())
- fmt.Fprintf(&buf, "%s\n\t%s:%d\n", f.Fn, file, f.Line)
- }
- return buf.String()
-}
-
-// parseTrace parses the given trace or skips the test if the trace is broken
-// due to known issues. Partially copied from runtime/trace/trace_test.go.
-func parseTrace(t *testing.T, r io.Reader) []*trace.Event {
- res, err := trace.Parse(r, "")
- if err == trace.ErrTimeOrder {
- t.Skipf("skipping trace: %v", err)
- }
- if err != nil {
- t.Fatalf("failed to parse trace: %v", err)
- }
- return res.Events
-}
-
-func mustFindLogV2(t *testing.T, trace io.Reader, category string) tracev2.Event {
- r, err := tracev2.NewReader(trace)
+func mustFindLogV2(t *testing.T, trc io.Reader, category string) trace.Event {
+ r, err := trace.NewReader(trc)
if err != nil {
t.Fatalf("bad trace: %v", err)
}
- var candidates []tracev2.Event
+ var candidates []trace.Event
for {
ev, err := r.ReadEvent()
if err == io.EOF {
@@ -125,7 +79,7 @@ func mustFindLogV2(t *testing.T, trace io.Reader, category string) tracev2.Event
if err != nil {
t.Fatalf("failed to parse trace: %v", err)
}
- if ev.Kind() == tracev2.EventLog && ev.Log().Category == category {
+ if ev.Kind() == trace.EventLog && ev.Log().Category == category {
candidates = append(candidates, ev)
}
}
@@ -138,9 +92,9 @@ func mustFindLogV2(t *testing.T, trace io.Reader, category string) tracev2.Event
}
// dumpStack returns e.Stack() as a string.
-func dumpStackV2(e *tracev2.Event) string {
+func dumpStackV2(e *trace.Event) string {
var buf bytes.Buffer
- e.Stack().Frames(func(f tracev2.StackFrame) bool {
+ e.Stack().Frames(func(f trace.StackFrame) bool {
file := strings.TrimPrefix(f.File, runtime.GOROOT())
fmt.Fprintf(&buf, "%s\n\t%s:%d\n", f.Func, file, f.Line)
return true
diff --git a/src/runtime/traceallocfree.go b/src/runtime/traceallocfree.go
new file mode 100644
index 0000000000..e6a2a79c69
--- /dev/null
+++ b/src/runtime/traceallocfree.go
@@ -0,0 +1,162 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Runtime -> tracer API for memory events.
+
+package runtime
+
+import (
+ "internal/abi"
+ "runtime/internal/sys"
+)
+
+// Batch type values for the alloc/free experiment.
+const (
+ traceAllocFreeTypesBatch = iota // Contains types. [{id, address, size, ptrspan, name length, name string} ...]
+ traceAllocFreeInfoBatch // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align]
+)
+
+// traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
+// (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
+//
+// The world must be stopped and tracing must be enabled when this function is called.
+func traceSnapshotMemory(gen uintptr) {
+ assertWorldStopped()
+
+ // Write a batch containing information that'll be necessary to
+ // interpret the events.
+ var flushed bool
+ w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree)
+ w, flushed = w.ensure(1 + 4*traceBytesPerNumber)
+ if flushed {
+ // Annotate the batch as containing additional info.
+ w.byte(byte(traceAllocFreeInfoBatch))
+ }
+
+ // Emit info.
+ w.varint(uint64(trace.minPageHeapAddr))
+ w.varint(uint64(pageSize))
+ w.varint(uint64(minHeapAlign))
+ w.varint(uint64(fixedStack))
+
+ // Finish writing the batch.
+ w.flush().end()
+
+ // Start tracing.
+ trace := traceAcquire()
+ if !trace.ok() {
+ throw("traceSnapshotMemory: tracing is not enabled")
+ }
+
+ // Write out all the heap spans and heap objects.
+ for _, s := range mheap_.allspans {
+ if s.state.get() == mSpanDead {
+ continue
+ }
+ // It's some kind of span, so trace that it exists.
+ trace.SpanExists(s)
+
+ // Write out allocated objects if it's a heap span.
+ if s.state.get() != mSpanInUse {
+ continue
+ }
+
+ // Find all allocated objects.
+ abits := s.allocBitsForIndex(0)
+ for i := uintptr(0); i < uintptr(s.nelems); i++ {
+ if abits.index < uintptr(s.freeindex) || abits.isMarked() {
+ x := s.base() + i*s.elemsize
+ trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ)
+ }
+ abits.advance()
+ }
+ }
+
+ // Write out all the goroutine stacks.
+ forEachGRace(func(gp *g) {
+ trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
+ })
+ traceRelease(trace)
+}
+
+func traceSpanTypeAndClass(s *mspan) traceArg {
+ if s.state.get() == mSpanInUse {
+ return traceArg(s.spanclass) << 1
+ }
+ return traceArg(1)
+}
+
+// SpanExists records an event indicating that the span exists.
+func (tl traceLocker) SpanExists(s *mspan) {
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
+}
+
+// SpanAlloc records an event indicating that the span has just been allocated.
+func (tl traceLocker) SpanAlloc(s *mspan) {
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
+}
+
+// SpanFree records an event indicating that the span is about to be freed.
+func (tl traceLocker) SpanFree(s *mspan) {
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanFree, traceSpanID(s))
+}
+
+// traceSpanID creates a trace ID for the span s for the trace.
+func traceSpanID(s *mspan) traceArg {
+ return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize
+}
+
+// HeapObjectExists records that an object already exists at addr with the provided type.
+// The type is optional, and the size of the slot occupied the object is inferred from the
+// span containing it.
+func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) {
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
+}
+
+// HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
+// The type is optional, and the size of the slot occupied the object is inferred from the
+// span containing it.
+func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) {
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
+}
+
+// HeapObjectFree records that an object at addr is about to be freed.
+func (tl traceLocker) HeapObjectFree(addr uintptr) {
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectFree, traceHeapObjectID(addr))
+}
+
+// traceHeapObjectID creates a trace ID for a heap object at address addr.
+func traceHeapObjectID(addr uintptr) traceArg {
+ return traceArg(uint64(addr)-trace.minPageHeapAddr) / minHeapAlign
+}
+
+// GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
+func (tl traceLocker) GoroutineStackExists(base, size uintptr) {
+ order := traceCompressStackSize(size)
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStack, traceGoroutineStackID(base), order)
+}
+
+// GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
+func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) {
+ order := traceCompressStackSize(size)
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackAlloc, traceGoroutineStackID(base), order)
+}
+
+// GoroutineStackFree records that a goroutine stack at address base is about to be freed.
+func (tl traceLocker) GoroutineStackFree(base uintptr) {
+ tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackFree, traceGoroutineStackID(base))
+}
+
+// traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
+func traceGoroutineStackID(base uintptr) traceArg {
+ return traceArg(uint64(base)-trace.minPageHeapAddr) / fixedStack
+}
+
+// traceCompressStackSize assumes size is a power of 2 and returns log2(size).
+func traceCompressStackSize(size uintptr) traceArg {
+ if size&(size-1) != 0 {
+ throw("goroutine stack size is not a power of 2")
+ }
+ return traceArg(sys.Len64(uint64(size)))
+}
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 100662f274..03c02f7771 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -8,6 +8,7 @@ import (
"internal/abi"
"internal/bytealg"
"internal/goarch"
+ "internal/stringslite"
"runtime/internal/sys"
"unsafe"
)
@@ -1078,6 +1079,16 @@ func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) {
print("\n")
}
+// callers should be an internal detail,
+// (and is almost identical to Callers),
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/phuslu/log
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname callers
func callers(skip int, pcbuf []uintptr) int {
sp := getcallersp()
pc := getcallerpc()
@@ -1131,7 +1142,7 @@ func showfuncinfo(sf srcFunc, firstFrame bool, calleeID abi.FuncID) bool {
return true
}
- return bytealg.IndexByteString(name, '.') >= 0 && (!hasPrefix(name, "runtime.") || isExportedRuntime(name))
+ return bytealg.IndexByteString(name, '.') >= 0 && (!stringslite.HasPrefix(name, "runtime.") || isExportedRuntime(name))
}
// isExportedRuntime reports whether name is an exported runtime function.
@@ -1342,7 +1353,7 @@ func isSystemGoroutine(gp *g, fixed bool) bool {
}
return fingStatus.Load()&fingRunningFinalizer == 0
}
- return hasPrefix(funcname(f), "runtime.")
+ return stringslite.HasPrefix(funcname(f), "runtime.")
}
// SetCgoTraceback records three C functions to use to gather
diff --git a/src/runtime/traceback_system_test.go b/src/runtime/traceback_system_test.go
index 5131e44e64..ece58e806d 100644
--- a/src/runtime/traceback_system_test.go
+++ b/src/runtime/traceback_system_test.go
@@ -28,7 +28,7 @@ func crash() {
// Ensure that we get pc=0x%x values in the traceback.
debug.SetTraceback("system")
writeSentinel(os.Stdout)
- debug.SetCrashOutput(os.Stdout)
+ debug.SetCrashOutput(os.Stdout, debug.CrashOptions{})
go func() {
// This call is typically inlined.
diff --git a/src/runtime/tracebuf.go b/src/runtime/tracebuf.go
index 711a2c1f02..db4adf53e9 100644
--- a/src/runtime/tracebuf.go
+++ b/src/runtime/tracebuf.go
@@ -59,7 +59,7 @@ func (w traceWriter) end() {
func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
refill := w.traceBuf == nil || !w.available(maxSize)
if refill {
- w = w.refill()
+ w = w.refill(traceNoExperiment)
}
return w, refill
}
@@ -78,7 +78,9 @@ func (w traceWriter) flush() traceWriter {
}
// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
-func (w traceWriter) refill() traceWriter {
+//
+// exp indicates whether the refilled batch should be EvExperimentalBatch.
+func (w traceWriter) refill(exp traceExperiment) traceWriter {
systemstack(func() {
lock(&trace.lock)
if w.traceBuf != nil {
@@ -112,7 +114,12 @@ func (w traceWriter) refill() traceWriter {
}
// Write the buffer's header.
- w.byte(byte(traceEvEventBatch))
+ if exp == traceNoExperiment {
+ w.byte(byte(traceEvEventBatch))
+ } else {
+ w.byte(byte(traceEvExperimentalBatch))
+ w.byte(byte(exp))
+ }
w.varint(uint64(w.gen))
w.varint(uint64(mID))
w.varint(uint64(ts))
diff --git a/src/runtime/traceevent.go b/src/runtime/traceevent.go
index bdb3f3e445..2a869fb515 100644
--- a/src/runtime/traceevent.go
+++ b/src/runtime/traceevent.go
@@ -7,6 +7,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
)
@@ -87,6 +88,9 @@ const (
// GoStatus with stack.
traceEvGoStatusStack // goroutine status at the start of a generation, with a stack [timestamp, goroutine ID, M ID, status, stack ID]
+
+ // Batch event for an experimental batch with a custom format.
+ traceEvExperimentalBatch // start of extra data [experiment ID, generation, M ID, timestamp, batch length, batch data...]
)
// traceArg is a simple wrapper type to help ensure that arguments passed
@@ -198,3 +202,8 @@ func (tl traceLocker) string(s string) traceArg {
func (tl traceLocker) uniqueString(s string) traceArg {
return traceArg(trace.stringTab[tl.gen%2].emit(tl.gen, s))
}
+
+// rtype returns a traceArg representing typ which may be passed to write.
+func (tl traceLocker) rtype(typ *abi.Type) traceArg {
+ return traceArg(trace.typeTab[tl.gen%2].put(typ))
+}
diff --git a/src/runtime/traceexp.go b/src/runtime/traceexp.go
new file mode 100644
index 0000000000..9fc85df5a8
--- /dev/null
+++ b/src/runtime/traceexp.go
@@ -0,0 +1,68 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// traceExpWriter is a wrapper around trace writer that produces traceEvExperimentalBatch
+// batches. This means that the data written to the writer need not conform to the standard
+// trace format.
+type traceExpWriter struct {
+ traceWriter
+ exp traceExperiment
+}
+
+// unsafeTraceExpWriter produces a traceExpWriter that doesn't lock the trace.
+//
+// It should only be used in contexts where either:
+// - Another traceLocker is held.
+// - trace.gen is prevented from advancing.
+//
+// buf may be nil.
+func unsafeTraceExpWriter(gen uintptr, buf *traceBuf, exp traceExperiment) traceExpWriter {
+ return traceExpWriter{traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}, exp}
+}
+
+// ensure makes sure that at least maxSize bytes are available to write.
+//
+// Returns whether the buffer was flushed.
+func (w traceExpWriter) ensure(maxSize int) (traceExpWriter, bool) {
+ refill := w.traceBuf == nil || !w.available(maxSize)
+ if refill {
+ w.traceWriter = w.traceWriter.refill(w.exp)
+ }
+ return w, refill
+}
+
+// traceExperiment is an enumeration of the different kinds of experiments supported for tracing.
+type traceExperiment uint8
+
+const (
+ // traceNoExperiment indicates no experiment.
+ traceNoExperiment traceExperiment = iota
+
+ // traceExperimentAllocFree is an experiment to add alloc/free events to the trace.
+ traceExperimentAllocFree
+)
+
+// Experimental events.
+const (
+ _ traceEv = 127 + iota
+
+ // Experimental events for ExperimentAllocFree.
+
+ // Experimental heap span events. IDs map reversibly to base addresses.
+ traceEvSpan // heap span exists [timestamp, id, npages, type/class]
+ traceEvSpanAlloc // heap span alloc [timestamp, id, npages, type/class]
+ traceEvSpanFree // heap span free [timestamp, id]
+
+ // Experimental heap object events. IDs map reversibly to addresses.
+ traceEvHeapObject // heap object exists [timestamp, id, type]
+ traceEvHeapObjectAlloc // heap object alloc [timestamp, id, type]
+ traceEvHeapObjectFree // heap object free [timestamp, id]
+
+ // Experimental goroutine stack events. IDs map reversibly to addresses.
+ traceEvGoroutineStack // stack exists [timestamp, id, order]
+ traceEvGoroutineStackAlloc // stack alloc [timestamp, id, order]
+ traceEvGoroutineStackFree // stack free [timestamp, id]
+)
diff --git a/src/runtime/traceregion.go b/src/runtime/traceregion.go
index fdc6fbdb32..43eef9c92b 100644
--- a/src/runtime/traceregion.go
+++ b/src/runtime/traceregion.go
@@ -104,7 +104,9 @@ func (a *traceRegionAlloc) drop() {
a.full = block.next
sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
}
- sysFree(a.current.Load(), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
- a.current.Store(nil)
+ if current := a.current.Load(); current != nil {
+ sysFree(current, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
+ a.current.Store(nil)
+ }
a.dropping.Store(false)
}
diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go
index 3e0e3b3a76..195b3e1c37 100644
--- a/src/runtime/traceruntime.go
+++ b/src/runtime/traceruntime.go
@@ -59,6 +59,8 @@ func traceLockInit() {
lockInit(&trace.stringTab[1].tab.mem.lock, lockRankTraceStrings)
lockInit(&trace.stackTab[0].tab.mem.lock, lockRankTraceStackTab)
lockInit(&trace.stackTab[1].tab.mem.lock, lockRankTraceStackTab)
+ lockInit(&trace.typeTab[0].tab.mem.lock, lockRankTraceTypeTab)
+ lockInit(&trace.typeTab[1].tab.mem.lock, lockRankTraceTypeTab)
lockInit(&trace.lock, lockRankTrace)
}
@@ -142,6 +144,14 @@ func traceEnabled() bool {
return trace.enabled
}
+// traceAllocFreeEnabled returns true if the trace is currently enabled
+// and alloc/free events are also enabled.
+//
+//go:nosplit
+func traceAllocFreeEnabled() bool {
+ return trace.enabledWithAllocFree
+}
+
// traceShuttingDown returns true if the trace is currently shutting down.
func traceShuttingDown() bool {
return trace.shutdown.Load()
@@ -174,6 +184,22 @@ func traceAcquire() traceLocker {
return traceAcquireEnabled()
}
+// traceTryAcquire is like traceAcquire, but may return an invalid traceLocker even
+// if tracing is enabled. For example, it will return !ok if traceAcquire is being
+// called with an active traceAcquire on the M (reentrant locking). This exists for
+// optimistically emitting events in the few contexts where tracing is now allowed.
+//
+// nosplit for alignment with traceTryAcquire, so it can be used in the
+// same contexts.
+//
+//go:nosplit
+func traceTryAcquire() traceLocker {
+ if !traceEnabled() {
+ return traceLocker{}
+ }
+ return traceTryAcquireEnabled()
+}
+
// traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
// broken out to make traceAcquire inlineable to keep the overhead of the tracer
// when it's disabled low.
@@ -218,6 +244,26 @@ func traceAcquireEnabled() traceLocker {
return traceLocker{mp, gen}
}
+// traceTryAcquireEnabled is like traceAcquireEnabled but may return an invalid
+// traceLocker under some conditions. See traceTryAcquire for more details.
+//
+// nosplit for alignment with traceAcquireEnabled, so it can be used in the
+// same contexts.
+//
+//go:nosplit
+func traceTryAcquireEnabled() traceLocker {
+ // Any time we acquire a traceLocker, we may flush a trace buffer. But
+ // buffer flushes are rare. Record the lock edge even if it doesn't happen
+ // this time.
+ lockRankMayTraceFlush()
+
+ // Check if we're already locked. If so, return an invalid traceLocker.
+ if getg().m.trace.seqlock.Load()%2 == 1 {
+ return traceLocker{}
+ }
+ return traceAcquireEnabled()
+}
+
// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
@@ -560,11 +606,6 @@ func (tl traceLocker) HeapGoal() {
tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
}
-// OneNewExtraM is a no-op in the new tracer. This is worth keeping around though because
-// it's a good place to insert a thread-level event about the new extra M.
-func (tl traceLocker) OneNewExtraM(_ *g) {
-}
-
// GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
//
// Unlike GoCreate, the caller must be running on gp.
@@ -657,14 +698,6 @@ func trace_userLog(id uint64, category, message string) {
traceRelease(tl)
}
-// traceProcFree is called when a P is destroyed.
-//
-// This must run on the system stack to match the old tracer.
-//
-//go:systemstack
-func traceProcFree(_ *p) {
-}
-
// traceThreadDestroy is called when a thread is removed from
// sched.freem.
//
@@ -703,10 +736,3 @@ func traceThreadDestroy(mp *m) {
throw("bad use of trace.seqlock")
}
}
-
-// Not used in the new tracer; solely for compatibility with the old tracer.
-// nosplit because it's called from exitsyscall without a P.
-//
-//go:nosplit
-func (_ traceLocker) RecordSyscallExitedTime(_ *g, _ *p) {
-}
diff --git a/src/runtime/tracestack.go b/src/runtime/tracestack.go
index 04b935a2c9..225566d102 100644
--- a/src/runtime/tracestack.go
+++ b/src/runtime/tracestack.go
@@ -147,20 +147,22 @@ func (t *traceStackTable) put(pcs []uintptr) uint64 {
// releases all memory and resets state. It must only be called once the caller
// can guarantee that there are no more writers to the table.
func (t *traceStackTable) dump(gen uintptr) {
+ stackBuf := make([]uintptr, traceStackSize)
w := unsafeTraceWriter(gen, nil)
if root := (*traceMapNode)(t.tab.root.Load()); root != nil {
- w = dumpStacksRec(root, w)
+ w = dumpStacksRec(root, w, stackBuf)
}
w.flush().end()
t.tab.reset()
}
-func dumpStacksRec(node *traceMapNode, w traceWriter) traceWriter {
+func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceWriter {
stack := unsafe.Slice((*uintptr)(unsafe.Pointer(&node.data[0])), uintptr(len(node.data))/unsafe.Sizeof(uintptr(0)))
// N.B. This might allocate, but that's OK because we're not writing to the M's buffer,
// but one we're about to create (with ensure).
- frames := makeTraceFrames(w.gen, fpunwindExpand(stack))
+ n := fpunwindExpand(stackBuf, stack)
+ frames := makeTraceFrames(w.gen, stackBuf[:n])
// The maximum number of bytes required to hold the encoded stack, given that
// it contains N frames.
@@ -194,7 +196,7 @@ func dumpStacksRec(node *traceMapNode, w traceWriter) traceWriter {
if child == nil {
continue
}
- w = dumpStacksRec((*traceMapNode)(child), w)
+ w = dumpStacksRec((*traceMapNode)(child), w, stackBuf)
}
return w
}
@@ -260,31 +262,43 @@ func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int) {
return i
}
+//go:linkname pprof_fpunwindExpand
+func pprof_fpunwindExpand(dst, src []uintptr) int {
+ return fpunwindExpand(dst, src)
+}
+
+// fpunwindExpand expands a call stack from pcBuf into dst,
+// returning the number of PCs written to dst.
+// pcBuf and dst should not overlap.
+//
// fpunwindExpand checks if pcBuf contains logical frames (which include inlined
// frames) or physical frames (produced by frame pointer unwinding) using a
// sentinel value in pcBuf[0]. Logical frames are simply returned without the
// sentinel. Physical frames are turned into logical frames via inline unwinding
// and by applying the skip value that's stored in pcBuf[0].
-func fpunwindExpand(pcBuf []uintptr) []uintptr {
- if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
+func fpunwindExpand(dst, pcBuf []uintptr) int {
+ if len(pcBuf) == 0 {
+ return 0
+ } else if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
// pcBuf contains logical rather than inlined frames, skip has already been
// applied, just return it without the sentinel value in pcBuf[0].
- return pcBuf[1:]
+ return copy(dst, pcBuf[1:])
}
var (
+ n int
lastFuncID = abi.FuncIDNormal
- newPCBuf = make([]uintptr, 0, traceStackSize)
skip = pcBuf[0]
// skipOrAdd skips or appends retPC to newPCBuf and returns true if more
// pcs can be added.
skipOrAdd = func(retPC uintptr) bool {
if skip > 0 {
skip--
- } else {
- newPCBuf = append(newPCBuf, retPC)
+ } else if n < len(dst) {
+ dst[n] = retPC
+ n++
}
- return len(newPCBuf) < cap(newPCBuf)
+ return n < len(dst)
}
)
@@ -312,7 +326,7 @@ outer:
lastFuncID = sf.funcID
}
}
- return newPCBuf
+ return n
}
// startPCForTrace returns the start PC of a goroutine for tracing purposes.
diff --git a/src/runtime/tracetime.go b/src/runtime/tracetime.go
index baef630ab5..571012413f 100644
--- a/src/runtime/tracetime.go
+++ b/src/runtime/tracetime.go
@@ -6,7 +6,10 @@
package runtime
-import "internal/goarch"
+import (
+ "internal/goarch"
+ _ "unsafe"
+)
// Timestamps in trace are produced through either nanotime or cputicks
// and divided by traceTimeDiv. nanotime is used everywhere except on
@@ -46,6 +49,9 @@ type traceTime uint64
//
// nosplit because it's called from exitsyscall, which is nosplit.
//
+// traceClockNow is called by golang.org/x/exp/trace using linkname.
+//
+//go:linkname traceClockNow
//go:nosplit
func traceClockNow() traceTime {
if osHasLowResClock {
diff --git a/src/runtime/tracetype.go b/src/runtime/tracetype.go
new file mode 100644
index 0000000000..b27a690916
--- /dev/null
+++ b/src/runtime/tracetype.go
@@ -0,0 +1,82 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Trace stack table and acquisition.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "unsafe"
+)
+
+// traceTypeTable maps stack traces (arrays of PC's) to unique uint32 ids.
+// It is lock-free for reading.
+type traceTypeTable struct {
+ tab traceMap
+}
+
+// put returns a unique id for the type typ and caches it in the table,
+// if it's seeing it for the first time.
+//
+// N.B. typ must be kept alive forever for this to work correctly.
+func (t *traceTypeTable) put(typ *abi.Type) uint64 {
+ if typ == nil {
+ return 0
+ }
+ // Insert the pointer to the type itself.
+ id, _ := t.tab.put(noescape(unsafe.Pointer(&typ)), goarch.PtrSize)
+ return id
+}
+
+// dump writes all previously cached types to trace buffers and
+// releases all memory and resets state. It must only be called once the caller
+// can guarantee that there are no more writers to the table.
+func (t *traceTypeTable) dump(gen uintptr) {
+ w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree)
+ if root := (*traceMapNode)(t.tab.root.Load()); root != nil {
+ w = dumpTypesRec(root, w)
+ }
+ w.flush().end()
+ t.tab.reset()
+}
+
+func dumpTypesRec(node *traceMapNode, w traceExpWriter) traceExpWriter {
+ typ := (*abi.Type)(*(*unsafe.Pointer)(unsafe.Pointer(&node.data[0])))
+ typName := toRType(typ).string()
+
+ // The maximum number of bytes required to hold the encoded type.
+ maxBytes := 1 + 5*traceBytesPerNumber + len(typName)
+
+ // Estimate the size of this record. This
+ // bound is pretty loose, but avoids counting
+ // lots of varint sizes.
+ //
+ // Add 1 because we might also write a traceAllocFreeTypesBatch byte.
+ var flushed bool
+ w, flushed = w.ensure(1 + maxBytes)
+ if flushed {
+ // Annotate the batch as containing types.
+ w.byte(byte(traceAllocFreeTypesBatch))
+ }
+
+ // Emit type.
+ w.varint(uint64(node.id))
+ w.varint(uint64(uintptr(unsafe.Pointer(typ))))
+ w.varint(uint64(typ.Size()))
+ w.varint(uint64(typ.PtrBytes))
+ w.varint(uint64(len(typName)))
+ w.stringData(typName)
+
+ // Recursively walk all child nodes.
+ for i := range node.children {
+ child := node.children[i].Load()
+ if child == nil {
+ continue
+ }
+ w = dumpTypesRec((*traceMapNode)(child), w)
+ }
+ return w
+}
diff --git a/src/runtime/type.go b/src/runtime/type.go
index a2975c4a99..201340752b 100644
--- a/src/runtime/type.go
+++ b/src/runtime/type.go
@@ -106,6 +106,15 @@ func reflectOffsUnlock() {
unlock(&reflectOffs.lock)
}
+// resolveNameOff should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname resolveNameOff
func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
if off == 0 {
return name{}
@@ -140,6 +149,15 @@ func (t rtype) nameOff(off nameOff) name {
return resolveNameOff(unsafe.Pointer(t.Type), off)
}
+// resolveTypeOff should be an internal detail,
+// but widely used packages access it using linkname.
+// Notable members of the hall of shame include:
+// - github.com/cloudwego/frugal
+//
+// Do not remove or change the type signature.
+// See go.dev/issue/67401.
+//
+//go:linkname resolveTypeOff
func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
if off == 0 || off == -1 {
// -1 is the sentinel value for unreachable code.
diff --git a/src/runtime/vdso_linux_amd64.go b/src/runtime/vdso_linux_amd64.go
index 4e9f748f4a..9c56409137 100644
--- a/src/runtime/vdso_linux_amd64.go
+++ b/src/runtime/vdso_linux_amd64.go
@@ -4,6 +4,8 @@
package runtime
+import _ "unsafe" // for linkname
+
const (
// vdsoArrayMax is the byte-size of a maximally sized array on this architecture.
// See cmd/compile/internal/amd64/galign.go arch.MAXWIDTH initialization.
@@ -21,3 +23,6 @@ var (
vdsoGettimeofdaySym uintptr
vdsoClockgettimeSym uintptr
)
+
+// vdsoGettimeofdaySym is accessed from the syscall package.
+//go:linkname vdsoGettimeofdaySym