aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/sys_linux_arm.s
diff options
context:
space:
mode:
authorCherry Zhang <cherryyz@google.com>2020-08-04 20:25:10 -0400
committerCherry Zhang <cherryyz@google.com>2020-08-10 18:06:04 +0000
commita93a4c178025b52be85c9eb4b9f2815716a62d11 (patch)
tree85499b8d785eea7bd3ec0752da80d285fd9d52f3 /src/runtime/sys_linux_arm.s
parentba9e10889976025ee1d027db6b1cad383ec56de8 (diff)
downloadgo-a93a4c178025b52be85c9eb4b9f2815716a62d11.tar.gz
go-a93a4c178025b52be85c9eb4b9f2815716a62d11.zip
runtime: make nanotime1 reentrant
Currently, nanotime1 (and walltime1) is not reentrant, in that it sets m.vdsoSP at entry and clears it at exit. If a signal lands in between, and nanotime1 is called from the signal handler, it will clear m.vdsoSP while we are still in nanotime1. If (in the unlikely event) it is signaled again, m.vdsoSP will be wrong, which may cause the stack unwinding code to crash. This CL makes it reentrant, by saving/restoring the previous vdsoPC and vdsoSP, instead of setting it to 0 at exit. TODO: have some way to test? Change-Id: I9ee53b251f1d8a5a489c71d4b4c0df1dee70c3e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/246763 Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
Diffstat (limited to 'src/runtime/sys_linux_arm.s')
-rw-r--r--src/runtime/sys_linux_arm.s36
1 files changed, 32 insertions, 4 deletions
diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s
index e103da56dc..475f52344c 100644
--- a/src/runtime/sys_linux_arm.s
+++ b/src/runtime/sys_linux_arm.s
@@ -242,7 +242,7 @@ TEXT runtime·mincore(SB),NOSPLIT,$0
MOVW R0, ret+12(FP)
RET
-TEXT runtime·walltime1(SB),NOSPLIT,$0-12
+TEXT runtime·walltime1(SB),NOSPLIT,$8-12
// We don't know how much stack space the VDSO code will need,
// so switch to g0.
@@ -252,6 +252,13 @@ TEXT runtime·walltime1(SB),NOSPLIT,$0-12
MOVW g_m(g), R5 // R5 is unchanged by C code.
// Set vdsoPC and vdsoSP for SIGPROF traceback.
+ // Save the old values on stack and restore them on exit,
+ // so this function is reentrant.
+ MOVW m_vdsoPC(R5), R1
+ MOVW m_vdsoSP(R5), R2
+ MOVW R1, 4(R13)
+ MOVW R2, 8(R13)
+
MOVW LR, m_vdsoPC(R5)
MOVW R13, m_vdsoSP(R5)
@@ -312,8 +319,15 @@ finish:
MOVW 12(R13), R2 // nsec
MOVW R4, R13 // Restore real SP
- MOVW $0, R1
+ // Restore vdsoPC, vdsoSP
+ // We don't worry about being signaled between the two stores.
+ // If we are not in a signal handler, we'll restore vdsoSP to 0,
+ // and no one will care about vdsoPC. If we are in a signal handler,
+ // we cannot receive another signal.
+ MOVW 8(R13), R1
MOVW R1, m_vdsoSP(R5)
+ MOVW 4(R13), R1
+ MOVW R1, m_vdsoPC(R5)
MOVW R0, sec_lo+0(FP)
MOVW R1, sec_hi+4(FP)
@@ -321,7 +335,7 @@ finish:
RET
// int64 nanotime1(void)
-TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
+TEXT runtime·nanotime1(SB),NOSPLIT,$8-8
// Switch to g0 stack. See comment above in runtime·walltime.
// Save old SP. Use R13 instead of SP to avoid linker rewriting the offsets.
@@ -330,6 +344,13 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
MOVW g_m(g), R5 // R5 is unchanged by C code.
// Set vdsoPC and vdsoSP for SIGPROF traceback.
+ // Save the old values on stack and restore them on exit,
+ // so this function is reentrant.
+ MOVW m_vdsoPC(R5), R1
+ MOVW m_vdsoSP(R5), R2
+ MOVW R1, 4(R13)
+ MOVW R2, 8(R13)
+
MOVW LR, m_vdsoPC(R5)
MOVW R13, m_vdsoSP(R5)
@@ -390,8 +411,15 @@ finish:
MOVW 12(R13), R2 // nsec
MOVW R4, R13 // Restore real SP
- MOVW $0, R4
+ // Restore vdsoPC, vdsoSP
+ // We don't worry about being signaled between the two stores.
+ // If we are not in a signal handler, we'll restore vdsoSP to 0,
+ // and no one will care about vdsoPC. If we are in a signal handler,
+ // we cannot receive another signal.
+ MOVW 8(R13), R4
MOVW R4, m_vdsoSP(R5)
+ MOVW 4(R13), R4
+ MOVW R4, m_vdsoPC(R5)
MOVW $1000000000, R3
MULLU R0, R3, (R1, R0)