aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go144
1 files changed, 71 insertions, 73 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 4535f62053..b72194c76a 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -2351,6 +2351,11 @@ func handoffp(_p_ *p) {
startm(_p_, false)
return
}
+ // if there's trace work to do, start it straight away
+ if (trace.enabled || trace.shutdown) && traceReaderAvailable() {
+ startm(_p_, false)
+ return
+ }
// if it has GC work, start it straight away
if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
startm(_p_, false)
@@ -2535,7 +2540,9 @@ func execute(gp *g, inheritTime bool) {
// Finds a runnable goroutine to execute.
// Tries to steal from other P's, get g from local or global queue, poll network.
-func findrunnable() (gp *g, inheritTime bool) {
+// tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
+// reader) so the caller should try to wake a P.
+func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
_g_ := getg()
// The conditions here and in handoffp must agree: if
@@ -2552,8 +2559,43 @@ top:
runSafePointFn()
}
+ // now and pollUntil are saved for work stealing later,
+ // which may steal timers. It's important that between now
+ // and then, nothing blocks, so these numbers remain mostly
+ // relevant.
now, pollUntil, _ := checkTimers(_p_, 0)
+ // Try to schedule the trace reader.
+ if trace.enabled || trace.shutdown {
+ gp = traceReader()
+ if gp != nil {
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ traceGoUnpark(gp, 0)
+ return gp, false, true
+ }
+ }
+
+ // Try to schedule a GC worker.
+ if gcBlackenEnabled != 0 {
+ gp = gcController.findRunnableGCWorker(_p_)
+ if gp != nil {
+ return gp, false, true
+ }
+ }
+
+ // Check the global runnable queue once in a while to ensure fairness.
+ // Otherwise two goroutines can completely occupy the local runqueue
+ // by constantly respawning each other.
+ if _p_.schedtick%61 == 0 && sched.runqsize > 0 {
+ lock(&sched.lock)
+ gp = globrunqget(_p_, 1)
+ unlock(&sched.lock)
+ if gp != nil {
+ return gp, false, false
+ }
+ }
+
+ // Wake up the finalizer G.
if fingwait && fingwake {
if gp := wakefing(); gp != nil {
ready(gp, 0, true)
@@ -2565,7 +2607,7 @@ top:
// local runq
if gp, inheritTime := runqget(_p_); gp != nil {
- return gp, inheritTime
+ return gp, inheritTime, false
}
// global runq
@@ -2574,7 +2616,7 @@ top:
gp := globrunqget(_p_, 0)
unlock(&sched.lock)
if gp != nil {
- return gp, false
+ return gp, false, false
}
}
@@ -2593,7 +2635,7 @@ top:
if trace.enabled {
traceGoUnpark(gp, 0)
}
- return gp, false
+ return gp, false, false
}
}
@@ -2613,7 +2655,7 @@ top:
now = tnow
if gp != nil {
// Successfully stole.
- return gp, inheritTime
+ return gp, inheritTime, false
}
if newWork {
// There may be new timer or GC work; restart to
@@ -2629,9 +2671,8 @@ top:
// We have nothing to do.
//
// If we're in the GC mark phase, can safely scan and blacken objects,
- // and have work to do, run idle-time marking rather than give up the
- // P.
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
+ // and have work to do, run idle-time marking rather than give up the P.
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) && gcController.addIdleMarkWorker() {
node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node != nil {
_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
@@ -2640,8 +2681,9 @@ top:
if trace.enabled {
traceGoUnpark(gp, 0)
}
- return gp, false
+ return gp, false, false
}
+ gcController.removeIdleMarkWorker()
}
// wasm only:
@@ -2654,7 +2696,7 @@ top:
if trace.enabled {
traceGoUnpark(gp, 0)
}
- return gp, false
+ return gp, false, false
}
if otherReady {
goto top
@@ -2679,7 +2721,7 @@ top:
if sched.runqsize != 0 {
gp := globrunqget(_p_, 0)
unlock(&sched.lock)
- return gp, false
+ return gp, false, false
}
if releasep() != _p_ {
throw("findrunnable: wrong p")
@@ -2742,7 +2784,7 @@ top:
if trace.enabled {
traceGoUnpark(gp, 0)
}
- return gp, false
+ return gp, false, false
}
// Finally, check for timer creation or expiry concurrently with
@@ -2800,7 +2842,7 @@ top:
if trace.enabled {
traceGoUnpark(gp, 0)
}
- return gp, false
+ return gp, false, false
}
if wasSpinning {
_g_.m.spinning = true
@@ -2959,8 +3001,12 @@ func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64
// returned. The returned P has not been wired yet.
func checkIdleGCNoP() (*p, *g) {
// N.B. Since we have no P, gcBlackenEnabled may change at any time; we
- // must check again after acquiring a P.
- if atomic.Load(&gcBlackenEnabled) == 0 {
+ // must check again after acquiring a P. As an optimization, we also check
+ // if an idle mark worker is needed at all. This is OK here, because if we
+ // observe that one isn't needed, at least one is currently running. Even if
+ // it stops running, its own journey into the scheduler should schedule it
+ // again, if need be (at which point, this check will pass, if relevant).
+ if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
return nil, nil
}
if !gcMarkWorkAvailable(nil) {
@@ -2991,9 +3037,8 @@ func checkIdleGCNoP() (*p, *g) {
return nil, nil
}
- // Now that we own a P, gcBlackenEnabled can't change (as it requires
- // STW).
- if gcBlackenEnabled == 0 {
+ // Now that we own a P, gcBlackenEnabled can't change (as it requires STW).
+ if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
pidleput(pp)
unlock(&sched.lock)
return nil, nil
@@ -3003,6 +3048,7 @@ func checkIdleGCNoP() (*p, *g) {
if node == nil {
pidleput(pp)
unlock(&sched.lock)
+ gcController.removeIdleMarkWorker()
return nil, nil
}
@@ -3143,62 +3189,14 @@ top:
pp := _g_.m.p.ptr()
pp.preempt = false
- if sched.gcwaiting != 0 {
- gcstopm()
- goto top
- }
- if pp.runSafePointFn != 0 {
- runSafePointFn()
- }
-
- // Sanity check: if we are spinning, the run queue should be empty.
+ // Safety check: if we are spinning, the run queue should be empty.
// Check this before calling checkTimers, as that might call
// goready to put a ready goroutine on the local run queue.
if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
throw("schedule: spinning with local work")
}
- checkTimers(pp, 0)
-
- var gp *g
- var inheritTime bool
-
- // Normal goroutines will check for need to wakeP in ready,
- // but GCworkers and tracereaders will not, so the check must
- // be done here instead.
- tryWakeP := false
- if trace.enabled || trace.shutdown {
- gp = traceReader()
- if gp != nil {
- casgstatus(gp, _Gwaiting, _Grunnable)
- traceGoUnpark(gp, 0)
- tryWakeP = true
- }
- }
- if gp == nil && gcBlackenEnabled != 0 {
- gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
- if gp != nil {
- tryWakeP = true
- }
- }
- if gp == nil {
- // Check the global runnable queue once in a while to ensure fairness.
- // Otherwise two goroutines can completely occupy the local runqueue
- // by constantly respawning each other.
- if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
- lock(&sched.lock)
- gp = globrunqget(_g_.m.p.ptr(), 1)
- unlock(&sched.lock)
- }
- }
- if gp == nil {
- gp, inheritTime = runqget(_g_.m.p.ptr())
- // We can see gp != nil here even if the M is spinning,
- // if checkTimers added a local goroutine via goready.
- }
- if gp == nil {
- gp, inheritTime = findrunnable() // blocks until work is available
- }
+ gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available
// This thread is going to run a goroutine and is not spinning anymore,
// so if it was marked as spinning we need to reset it now and potentially
@@ -3658,7 +3656,7 @@ func reentersyscall(pc, sp uintptr) {
// Standard syscall entry used by the go syscall library and normal cgo calls.
//
-// This is exported via linkname to assembly in the syscall package.
+// This is exported via linkname to assembly in the syscall package and x/sys.
//
//go:nosplit
//go:linkname entersyscall
@@ -5184,9 +5182,9 @@ func sysmon() {
startm(nil, false)
}
}
- if atomic.Load(&scavenge.sysmonWake) != 0 {
+ if scavenger.sysmonWake.Load() != 0 {
// Kick the scavenger awake if someone requested it.
- wakeScavenger()
+ scavenger.wake()
}
// retake P's blocked in syscalls
// and preempt long running G's
@@ -5904,10 +5902,10 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
// between different Ps.
// A sync chan send/recv takes ~50ns as of time of
// writing, so 3us gives ~50x overshoot.
- if GOOS != "windows" {
+ if GOOS != "windows" && GOOS != "openbsd" {
usleep(3)
} else {
- // On windows system timer granularity is
+ // On some platforms system timer granularity is
// 1-15ms, which is way too much for this
// optimization. So just yield.
osyield()