aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2023-08-03 20:53:52 +0000
committerThan McIntosh <thanm@google.com>2023-09-25 13:03:10 +0000
commit0b6b0a275af2c60b922bf592833c47633bd8b6e5 (patch)
tree3b127fb1220fbcd5a8285f75afaac2c71303dc33
parentcd671a1180441309e88c7113c86157e91888ded7 (diff)
downloadgo-0b6b0a275af2c60b922bf592833c47633bd8b6e5.tar.gz
go-0b6b0a275af2c60b922bf592833c47633bd8b6e5.zip
[release-branch.go1.21] runtime: always lock OS thread in debugcall
Right now debuggers like Delve rely on the new goroutine created to run a debugcall function to run on the same thread it started on, up until it hits itself with a SIGINT as part of the debugcall protocol. That's all well and good, except debugCallWrap1 isn't particularly careful about not growing the stack. For example, if the new goroutine happens to have a stale preempt flag, then it's possible a stack growth will cause a roundtrip into the scheduler, possibly causing the goroutine to switch to another thread. Previous attempts to just be more careful around debugCallWrap1 were helpful, but insufficient. This change takes everything a step further and always locks the debug call goroutine and the new goroutine it creates to the OS thread. For #61732. Fixes #62509. Change-Id: I038f3a4df30072833e27e6a5a1ec01806a32891f Reviewed-on: https://go-review.googlesource.com/c/go/+/515637 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Alessandro Arzilli <alessandro.arzilli@gmail.com> Reviewed-by: Cherry Mui <cherryyz@google.com> (cherry picked from commit d9a4b24a1775c4c5baa4ce3005cb5af61346198e) Reviewed-on: https://go-review.googlesource.com/c/go/+/526576
-rw-r--r--src/runtime/debugcall.go58
1 files changed, 30 insertions, 28 deletions
diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go
index ea413bd0c5..e793545036 100644
--- a/src/runtime/debugcall.go
+++ b/src/runtime/debugcall.go
@@ -102,11 +102,18 @@ func debugCallCheck(pc uintptr) string {
//
//go:nosplit
func debugCallWrap(dispatch uintptr) {
- var lockedm bool
var lockedExt uint32
callerpc := getcallerpc()
gp := getg()
+ // Lock ourselves to the OS thread.
+ //
+ // Debuggers rely on us running on the same thread until we get to
+ // dispatch the function they asked as to.
+ //
+ // We're going to transfer this to the new G we just created.
+ lockOSThread()
+
// Create a new goroutine to execute the call on. Run this on
// the system stack to avoid growing our stack.
systemstack(func() {
@@ -121,27 +128,22 @@ func debugCallWrap(dispatch uintptr) {
}
newg.param = unsafe.Pointer(args)
- // If the current G is locked, then transfer that
- // locked-ness to the new goroutine.
- if gp.lockedm != 0 {
- // Save lock state to restore later.
- mp := gp.m
- if mp != gp.lockedm.ptr() {
- throw("inconsistent lockedm")
- }
-
- lockedm = true
- lockedExt = mp.lockedExt
-
- // Transfer external lock count to internal so
- // it can't be unlocked from the debug call.
- mp.lockedInt++
- mp.lockedExt = 0
-
- mp.lockedg.set(newg)
- newg.lockedm.set(mp)
- gp.lockedm = 0
+ // Transfer locked-ness to the new goroutine.
+ // Save lock state to restore later.
+ mp := gp.m
+ if mp != gp.lockedm.ptr() {
+ throw("inconsistent lockedm")
}
+ // Save the external lock count and clear it so
+ // that it can't be unlocked from the debug call.
+ // Note: we already locked internally to the thread,
+ // so if we were locked before we're still locked now.
+ lockedExt = mp.lockedExt
+ mp.lockedExt = 0
+
+ mp.lockedg.set(newg)
+ newg.lockedm.set(mp)
+ gp.lockedm = 0
// Mark the calling goroutine as being at an async
// safe-point, since it has a few conservative frames
@@ -177,13 +179,13 @@ func debugCallWrap(dispatch uintptr) {
// We'll resume here when the call returns.
// Restore locked state.
- if lockedm {
- mp := gp.m
- mp.lockedExt = lockedExt
- mp.lockedInt--
- mp.lockedg.set(gp)
- gp.lockedm.set(mp)
- }
+ mp := gp.m
+ mp.lockedExt = lockedExt
+ mp.lockedg.set(gp)
+ gp.lockedm.set(mp)
+
+ // Undo the lockOSThread we did earlier.
+ unlockOSThread()
gp.asyncSafePoint = false
}