diff options
author | Austin Clements <austin@google.com> | 2021-07-30 16:40:17 -0400 |
---|---|---|
committer | Austin Clements <austin@google.com> | 2021-07-30 21:51:49 +0000 |
commit | e3e9f0bb2d6cc15b201fe2e0a0ac095d62cf4b8c (patch) | |
tree | 1c5960b23d5810b75637928c33a6bd4c6357e8c1 /src/runtime | |
parent | 40e561d9337afbae221b34d6d0811761f32412f6 (diff) | |
download | go-e3e9f0bb2d6cc15b201fe2e0a0ac095d62cf4b8c.tar.gz go-e3e9f0bb2d6cc15b201fe2e0a0ac095d62cf4b8c.zip |
[dev.typeparams] Revert "[dev.typeparams] runtime,cmd/compile,cmd/link: replace jmpdefer with a loop"
This reverts CL 227652.
I'm reverting CL 337651 and this builds on top of it.
Change-Id: I03ce363be44c2a3defff2e43e7b1aad83386820d
Reviewed-on: https://go-review.googlesource.com/c/go/+/338709
Trust: Austin Clements <austin@google.com>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r-- | src/runtime/asm_386.s | 20 | ||||
-rw-r--r-- | src/runtime/asm_amd64.s | 15 | ||||
-rw-r--r-- | src/runtime/asm_arm.s | 14 | ||||
-rw-r--r-- | src/runtime/asm_arm64.s | 17 | ||||
-rw-r--r-- | src/runtime/asm_mips64x.s | 16 | ||||
-rw-r--r-- | src/runtime/asm_mipsx.s | 16 | ||||
-rw-r--r-- | src/runtime/asm_ppc64x.s | 28 | ||||
-rw-r--r-- | src/runtime/asm_riscv64.s | 15 | ||||
-rw-r--r-- | src/runtime/asm_s390x.s | 15 | ||||
-rw-r--r-- | src/runtime/asm_wasm.s | 29 | ||||
-rw-r--r-- | src/runtime/panic.go | 58 | ||||
-rw-r--r-- | src/runtime/stubs.go | 2 | ||||
-rw-r--r-- | src/runtime/symtab.go | 1 |
13 files changed, 221 insertions, 25 deletions
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index 11c60309f4..dd2ea458cc 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -582,6 +582,26 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 // compile barrier. RET +// void jmpdefer(fn, sp); +// called from deferreturn. +// 1. pop the caller +// 2. sub 5 bytes (the length of CALL & a 32 bit displacement) from the callers +// return (when building for shared libraries, subtract 16 bytes -- 5 bytes +// for CALL & displacement to call __x86.get_pc_thunk.cx, 6 bytes for the +// LEAL to load the offset into BX, and finally 5 for the call & displacement) +// 3. jmp to the argument +TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8 + MOVL fv+0(FP), DX // fn + MOVL argp+4(FP), BX // caller sp + LEAL -4(BX), SP // caller sp after CALL +#ifdef GOBUILDMODE_shared + SUBL $16, (SP) // return to CALL again +#else + SUBL $5, (SP) // return to CALL again +#endif + MOVL 0(DX), BX + JMP BX // but first run the deferred function + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 2d8f4c2412..0f719b2664 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -662,6 +662,21 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 // compile barrier. RET +// func jmpdefer(fv func(), argp uintptr) +// argp is a caller SP. +// called from deferreturn. +// 1. pop the caller +// 2. sub 5 bytes from the callers return +// 3. jmp to the argument +TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16 + MOVQ fv+0(FP), DX // fn + MOVQ argp+8(FP), BX // caller sp + LEAQ -8(BX), SP // caller sp after CALL + MOVQ -8(SP), BP // restore BP as if deferreturn returned (harmless if framepointers not in use) + SUBQ $5, (SP) // return to CALL again + MOVQ 0(DX), BX + JMP BX // but first run the deferred function + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index a1164781d2..5c2bc00fe8 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -506,6 +506,20 @@ CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) +// void jmpdefer(fn, sp); +// called from deferreturn. +// 1. grab stored LR for caller +// 2. sub 4 bytes to get back to BL deferreturn +// 3. B to fn +TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8 + MOVW 0(R13), LR + MOVW $-4(LR), LR // BL deferreturn + MOVW fv+0(FP), R7 + MOVW argp+4(FP), R13 + MOVW $-4(R13), R13 // SP is 4 below argp, due to saved LR + MOVW 0(R7), R1 + B (R1) + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index e51ce2f831..e7c5fa3225 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -982,6 +982,23 @@ again: CBNZ R0, again RET +// void jmpdefer(fv, sp); +// called from deferreturn. +// 1. grab stored LR for caller +// 2. sub 4 bytes to get back to BL deferreturn +// 3. BR to fn +TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 + MOVD 0(RSP), R0 + SUB $4, R0 + MOVD R0, LR + + MOVD fv+0(FP), R26 + MOVD argp+8(FP), R0 + MOVD R0, RSP + SUB $8, RSP + MOVD 0(R26), R3 + B (R3) + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index b2e2384c36..f3ac453d99 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -384,6 +384,22 @@ CALLFN(·call1073741824, 1073741824) TEXT runtime·procyield(SB),NOSPLIT,$0-0 RET +// void jmpdefer(fv, sp); +// called from deferreturn. +// 1. grab stored LR for caller +// 2. sub 8 bytes to get back to JAL deferreturn +// 3. JMP to fn +TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 + MOVV 0(R29), R31 + ADDV $-8, R31 + + MOVV fv+0(FP), REGCTXT + MOVV argp+8(FP), R29 + ADDV $-8, R29 + NOR R0, R0 // prevent scheduling + MOVV 0(REGCTXT), R4 + JMP (R4) + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s index 87a1344e8f..4dc165849e 100644 --- a/src/runtime/asm_mipsx.s +++ b/src/runtime/asm_mipsx.s @@ -382,6 +382,22 @@ CALLFN(·call1073741824, 1073741824) TEXT runtime·procyield(SB),NOSPLIT,$0-4 RET +// void jmpdefer(fv, sp); +// called from deferreturn. +// 1. grab stored LR for caller +// 2. sub 8 bytes to get back to JAL deferreturn +// 3. JMP to fn +TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8 + MOVW 0(R29), R31 + ADDU $-8, R31 + + MOVW fv+0(FP), REGCTXT + MOVW argp+4(FP), R29 + ADDU $-4, R29 + NOR R0, R0 // prevent scheduling + MOVW 0(REGCTXT), R4 + JMP (R4) + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 5dc96c5947..a789d041e4 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -503,6 +503,34 @@ again: OR R6, R6, R6 // Set PPR priority back to medium-low RET +// void jmpdefer(fv, sp); +// called from deferreturn. +// 1. grab stored LR for caller +// 2. sub 8 bytes to get back to either nop or toc reload before deferreturn +// 3. BR to fn +// When dynamically linking Go, it is not sufficient to rewind to the BL +// deferreturn -- we might be jumping between modules and so we need to reset +// the TOC pointer in r2. To do this, codegen inserts MOVD 24(R1), R2 *before* +// the BL deferreturn and jmpdefer rewinds to that. +TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 + MOVD 0(R1), R31 + SUB $8, R31 + MOVD R31, LR + + MOVD fv+0(FP), R11 + MOVD argp+8(FP), R1 + SUB $FIXED_FRAME, R1 +#ifdef GOOS_aix + // AIX won't trigger a SIGSEGV if R11 = nil + // So it manually triggers it + CMP R0, R11 + BNE 2(PC) + MOVD R0, 0(R0) +#endif + MOVD 0(R11), R12 + MOVD R12, CTR + BR (CTR) + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index 9927a817f7..9957ae201b 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -248,6 +248,21 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 MOV gobuf_pc(T0), T0 JALR ZERO, T0 +// func jmpdefer(fv func(), argp uintptr) +// called from deferreturn +// 1. grab stored return address from the caller's frame +// 2. sub 8 bytes to get back to JAL deferreturn +// 3. JMP to fn +TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 + MOV 0(X2), RA + ADD $-8, RA + + MOV fv+0(FP), CTXT + MOV argp+8(FP), X2 + ADD $-8, X2 + MOV 0(CTXT), T0 + JALR ZERO, T0 + // func procyield(cycles uint32) TEXT runtime·procyield(SB),NOSPLIT,$0-0 RET diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index d4110d563f..534cb6112c 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -480,6 +480,21 @@ TEXT callfnMVC<>(SB),NOSPLIT|NOFRAME,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0 RET +// void jmpdefer(fv, sp); +// called from deferreturn. +// 1. grab stored LR for caller +// 2. sub 6 bytes to get back to BL deferreturn (size of BRASL instruction) +// 3. BR to fn +TEXT runtime·jmpdefer(SB),NOSPLIT|NOFRAME,$0-16 + MOVD 0(R15), R1 + SUB $6, R1, LR + + MOVD fv+0(FP), R12 + MOVD argp+8(FP), R15 + SUB $8, R15 + MOVD 0(R12), R3 + BR (R3) + // Save state of caller into g->sched, // but using fake PC from systemstack_switch. // Must only be called from functions with no locals ($0) diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s index d885da6e70..53c271aa70 100644 --- a/src/runtime/asm_wasm.s +++ b/src/runtime/asm_wasm.s @@ -193,6 +193,35 @@ TEXT runtime·return0(SB), NOSPLIT, $0-0 MOVD $0, RET0 RET +TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16 + MOVD fv+0(FP), CTXT + + Get CTXT + I64Eqz + If + CALLNORESUME runtime·sigpanic<ABIInternal>(SB) + End + + // caller sp after CALL + I64Load argp+8(FP) + I64Const $8 + I64Sub + I32WrapI64 + Set SP + + // decrease PC_B by 1 to CALL again + Get SP + I32Load16U (SP) + I32Const $1 + I32Sub + I32Store16 $0 + + // but first run the deferred function + Get CTXT + I32WrapI64 + I64Load $0 + JMP + TEXT runtime·asminit(SB), NOSPLIT, $0-0 // No per-thread init. RET diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 4b8bca6c56..e66fe27be0 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -396,39 +396,47 @@ func freedeferfn() { throw("freedefer with d.fn != nil") } -// deferreturn runs deferred functions for the caller's frame. +// Run a deferred function if there is one. // The compiler inserts a call to this at the end of any // function which calls defer. +// If there is a deferred function, this will call runtime·jmpdefer, +// which will jump to the deferred function such that it appears +// to have been called by the caller of deferreturn at the point +// just before deferreturn was called. The effect is that deferreturn +// is called again and again until there are no more deferred functions. func deferreturn() { gp := getg() - for { - d := gp._defer - if d == nil { - return - } - sp := getcallersp() - if d.sp != sp { - return - } - if d.openDefer { - done := runOpenDeferFrame(gp, d) - if !done { - throw("unfinished open-coded defers in deferreturn") - } - gp._defer = d.link - freedefer(d) - // If this frame uses open defers, then this - // must be the only defer record for the - // frame, so we can just return. - return + d := gp._defer + if d == nil { + return + } + sp := getcallersp() + if d.sp != sp { + return + } + if d.openDefer { + done := runOpenDeferFrame(gp, d) + if !done { + throw("unfinished open-coded defers in deferreturn") } - - fn := d.fn - d.fn = nil gp._defer = d.link freedefer(d) - fn() + return } + + fn := d.fn + d.fn = nil + gp._defer = d.link + freedefer(d) + // If the defer function pointer is nil, force the seg fault to happen + // here rather than in jmpdefer. gentraceback() throws an error if it is + // called with a callback on an LR architecture and jmpdefer is on the + // stack, because jmpdefer manipulates SP (see issue #8153). + _ = **(**funcval)(unsafe.Pointer(&fn)) + // We must not split the stack between computing argp and + // calling jmpdefer because argp is a uintptr stack pointer. + argp := getcallersp() + sys.MinFrameSize + jmpdefer(fn, argp) } // Goexit terminates the goroutine that calls it. No other goroutine is affected. diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index fc29a1bac3..b94acdea1f 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -176,6 +176,8 @@ func cgocallback(fn, frame, ctxt uintptr) func gogo(buf *gobuf) +//go:noescape +func jmpdefer(fv func(), argp uintptr) func asminit() func setg(gg *g) func breakpoint() diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index d08aa0b320..44ea0710c6 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -331,6 +331,7 @@ const ( funcID_gogo funcID_gopanic funcID_handleAsyncEvent + funcID_jmpdefer funcID_mcall funcID_morestack funcID_mstart |