aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go1028
1 files changed, 511 insertions, 517 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 3991a48b10..0b3d90c5b2 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -143,11 +143,11 @@ var initSigmask sigset
// The main goroutine.
func main() {
- g := getg()
+ mp := getg().m
// Racectx of m0->g0 is used only as the parent of the main goroutine.
// It must not be used for anything else.
- g.m.g0.racectx = 0
+ mp.g0.racectx = 0
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
@@ -180,7 +180,7 @@ func main() {
// to preserve the lock.
lockOSThread()
- if g.m != &m0 {
+ if mp != &m0 {
throw("runtime.main not on m0")
}
@@ -678,9 +678,9 @@ func schedinit() {
// raceinit must be the first call to race detector.
// In particular, it must be done before mallocinit below calls racemapshadow.
- _g_ := getg()
+ gp := getg()
if raceenabled {
- _g_.racectx, raceprocctx0 = raceinit()
+ gp.racectx, raceprocctx0 = raceinit()
}
sched.maxmcount = 10000
@@ -694,14 +694,14 @@ func schedinit() {
cpuinit() // must run before alginit
alginit() // maps, hash, fastrand must not be used before this call
fastrandinit() // must run before mcommoninit
- mcommoninit(_g_.m, -1)
+ mcommoninit(gp.m, -1)
modulesinit() // provides activeModules
typelinksinit() // uses maps, activeModules
itabsinit() // uses activeModules
stkobjinit() // must run before GC starts
- sigsave(&_g_.m.sigmask)
- initSigmask = _g_.m.sigmask
+ sigsave(&gp.m.sigmask)
+ initSigmask = gp.m.sigmask
if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
println(offset)
@@ -733,8 +733,8 @@ func schedinit() {
if debug.cgocheck > 1 {
writeBarrier.cgo = true
writeBarrier.enabled = true
- for _, p := range allp {
- p.wbBuf.reset()
+ for _, pp := range allp {
+ pp.wbBuf.reset()
}
}
@@ -751,9 +751,9 @@ func schedinit() {
}
func dumpgstatus(gp *g) {
- _g_ := getg()
- print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
+ thisg := getg()
+ print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
}
// sched.lock must be held.
@@ -784,10 +784,10 @@ func mReserveID() int64 {
// Pre-allocated ID may be passed as 'id', or omitted by passing -1.
func mcommoninit(mp *m, id int64) {
- _g_ := getg()
+ gp := getg()
// g0 stack won't make sense for user (and is not necessary unwindable).
- if _g_ != _g_.m.g0 {
+ if gp != gp.m.g0 {
callers(1, mp.createstack[:])
}
@@ -848,7 +848,6 @@ func ready(gp *g, traceskip int, next bool) {
status := readgstatus(gp)
// Mark runnable.
- _g_ := getg()
mp := acquirem() // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting {
dumpgstatus(gp)
@@ -857,7 +856,7 @@ func ready(gp *g, traceskip int, next bool) {
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable)
- runqput(_g_.m.p.ptr(), gp, next)
+ runqput(mp.p.ptr(), gp, next)
wakep()
releasem(mp)
}
@@ -1177,11 +1176,11 @@ var gcsema uint32 = 1
// Holding worldsema causes any other goroutines invoking
// stopTheWorld to block.
func stopTheWorldWithSema() {
- _g_ := getg()
+ gp := getg()
// If we hold a lock, then we won't be able to stop another M
// that is blocked trying to acquire the lock.
- if _g_.m.locks > 0 {
+ if gp.m.locks > 0 {
throw("stopTheWorld: holding locks")
}
@@ -1190,28 +1189,28 @@ func stopTheWorldWithSema() {
atomic.Store(&sched.gcwaiting, 1)
preemptall()
// stop current P
- _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
+ gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
sched.stopwait--
// try to retake all P's in Psyscall status
- for _, p := range allp {
- s := p.status
- if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
+ for _, pp := range allp {
+ s := pp.status
+ if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
if trace.enabled {
- traceGoSysBlock(p)
- traceProcStop(p)
+ traceGoSysBlock(pp)
+ traceProcStop(pp)
}
- p.syscalltick++
+ pp.syscalltick++
sched.stopwait--
}
}
// stop idle P's
now := nanotime()
for {
- p, _ := pidleget(now)
- if p == nil {
+ pp, _ := pidleget(now)
+ if pp == nil {
break
}
- p.status = _Pgcstop
+ pp.status = _Pgcstop
sched.stopwait--
}
wait := sched.stopwait > 0
@@ -1234,8 +1233,8 @@ func stopTheWorldWithSema() {
if sched.stopwait != 0 {
bad = "stopTheWorld: not stopped (stopwait != 0)"
} else {
- for _, p := range allp {
- if p.status != _Pgcstop {
+ for _, pp := range allp {
+ if pp.status != _Pgcstop {
bad = "stopTheWorld: not stopped (status != _Pgcstop)"
}
}
@@ -1354,9 +1353,9 @@ func mstart()
//go:nosplit
//go:nowritebarrierrec
func mstart0() {
- _g_ := getg()
+ gp := getg()
- osStack := _g_.stack.lo == 0
+ osStack := gp.stack.lo == 0
if osStack {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
@@ -1366,25 +1365,25 @@ func mstart0() {
// We set hi to &size, but there are things above
// it. The 1024 is supposed to compensate this,
// but is somewhat arbitrary.
- size := _g_.stack.hi
+ size := gp.stack.hi
if size == 0 {
size = 8192 * sys.StackGuardMultiplier
}
- _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
- _g_.stack.lo = _g_.stack.hi - size + 1024
+ gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
+ gp.stack.lo = gp.stack.hi - size + 1024
}
// Initialize stack guard so that we can start calling regular
// Go code.
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
// This is the g0, so we can also call go:systemstack
// functions, which check stackguard1.
- _g_.stackguard1 = _g_.stackguard0
+ gp.stackguard1 = gp.stackguard0
mstart1()
// Exit this thread.
if mStackIsSystemAllocated() {
// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
- // the stack, but put it in _g_.stack before mstart,
+ // the stack, but put it in gp.stack before mstart,
// so the logic above hasn't set osStack yet.
osStack = true
}
@@ -1396,9 +1395,9 @@ func mstart0() {
//
//go:noinline
func mstart1() {
- _g_ := getg()
+ gp := getg()
- if _g_ != _g_.m.g0 {
+ if gp != gp.m.g0 {
throw("bad runtime·mstart")
}
@@ -1408,26 +1407,26 @@ func mstart1() {
// so other calls can reuse the current frame.
// And goexit0 does a gogo that needs to return from mstart1
// and let mstart0 exit the thread.
- _g_.sched.g = guintptr(unsafe.Pointer(_g_))
- _g_.sched.pc = getcallerpc()
- _g_.sched.sp = getcallersp()
+ gp.sched.g = guintptr(unsafe.Pointer(gp))
+ gp.sched.pc = getcallerpc()
+ gp.sched.sp = getcallersp()
asminit()
minit()
// Install signal handlers; after minit so that minit can
// prepare the thread to be able to handle the signals.
- if _g_.m == &m0 {
+ if gp.m == &m0 {
mstartm0()
}
- if fn := _g_.m.mstartfn; fn != nil {
+ if fn := gp.m.mstartfn; fn != nil {
fn()
}
- if _g_.m != &m0 {
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
+ if gp.m != &m0 {
+ acquirep(gp.m.nextp.ptr())
+ gp.m.nextp = 0
}
schedule()
}
@@ -1461,7 +1460,7 @@ func mPark() {
// mexit tears down and exits the current thread.
//
// Don't call this directly to exit the thread, since it must run at
-// the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
+// the top of the thread stack. Instead, use gogo(&gp.m.g0.sched) to
// unwind the stack to the point that exits the thread.
//
// It is entered with m.p != nil, so write barriers are allowed. It
@@ -1469,10 +1468,9 @@ func mPark() {
//
//go:yeswritebarrierrec
func mexit(osStack bool) {
- g := getg()
- m := g.m
+ mp := getg().m
- if m == &m0 {
+ if mp == &m0 {
// This is the main thread. Just wedge it.
//
// On Linux, exiting the main thread puts the process
@@ -1497,20 +1495,20 @@ func mexit(osStack bool) {
unminit()
// Free the gsignal stack.
- if m.gsignal != nil {
- stackfree(m.gsignal.stack)
+ if mp.gsignal != nil {
+ stackfree(mp.gsignal.stack)
// On some platforms, when calling into VDSO (e.g. nanotime)
// we store our g on the gsignal stack, if there is one.
// Now the stack is freed, unlink it from the m, so we
// won't write to it when calling VDSO code.
- m.gsignal = nil
+ mp.gsignal = nil
}
// Remove m from allm.
lock(&sched.lock)
for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
- if *pprev == m {
- *pprev = m.alllink
+ if *pprev == mp {
+ *pprev = mp.alllink
goto found
}
}
@@ -1521,17 +1519,17 @@ found:
//
// If this is using an OS stack, the OS will free it
// so there's no need for reaping.
- atomic.Store(&m.freeWait, 1)
+ atomic.Store(&mp.freeWait, 1)
// Put m on the free list, though it will not be reaped until
// freeWait is 0. Note that the free list must not be linked
// through alllink because some functions walk allm without
// locking, so may be using alllink.
- m.freelink = sched.freem
- sched.freem = m
+ mp.freelink = sched.freem
+ sched.freem = mp
}
unlock(&sched.lock)
- atomic.Xadd64(&ncgocall, int64(m.ncgocall))
+ atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
// Release the P.
handoffp(releasep())
@@ -1548,14 +1546,14 @@ found:
if GOOS == "darwin" || GOOS == "ios" {
// Make sure pendingPreemptSignals is correct when an M exits.
// For #41702.
- if atomic.Load(&m.signalPending) != 0 {
+ if atomic.Load(&mp.signalPending) != 0 {
atomic.Xadd(&pendingPreemptSignals, -1)
}
}
// Destroy all allocated resources. After this is called, we may no
// longer take any locks.
- mdestroy(m)
+ mdestroy(mp)
if osStack {
// Return from mstart and let the system thread
@@ -1567,7 +1565,7 @@ found:
// return to. Exit the thread directly. exitThread will clear
// m.freeWait when it's done with the stack and the m can be
// reaped.
- exitThread(&m.freeWait)
+ exitThread(&mp.freeWait)
}
// forEachP calls fn(p) for every P p when p reaches a GC safe point.
@@ -1583,7 +1581,7 @@ found:
//go:systemstack
func forEachP(fn func(*p)) {
mp := acquirem()
- _p_ := getg().m.p.ptr()
+ pp := getg().m.p.ptr()
lock(&sched.lock)
if sched.safePointWait != 0 {
@@ -1593,9 +1591,9 @@ func forEachP(fn func(*p)) {
sched.safePointFn = fn
// Ask all Ps to run the safe point function.
- for _, p := range allp {
- if p != _p_ {
- atomic.Store(&p.runSafePointFn, 1)
+ for _, p2 := range allp {
+ if p2 != pp {
+ atomic.Store(&p2.runSafePointFn, 1)
}
}
preemptall()
@@ -1617,19 +1615,19 @@ func forEachP(fn func(*p)) {
unlock(&sched.lock)
// Run fn for the current P.
- fn(_p_)
+ fn(pp)
// Force Ps currently in _Psyscall into _Pidle and hand them
// off to induce safe point function execution.
- for _, p := range allp {
- s := p.status
- if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
+ for _, p2 := range allp {
+ s := p2.status
+ if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
if trace.enabled {
- traceGoSysBlock(p)
- traceProcStop(p)
+ traceGoSysBlock(p2)
+ traceProcStop(p2)
}
- p.syscalltick++
- handoffp(p)
+ p2.syscalltick++
+ handoffp(p2)
}
}
@@ -1650,8 +1648,8 @@ func forEachP(fn func(*p)) {
if sched.safePointWait != 0 {
throw("forEachP: not done")
}
- for _, p := range allp {
- if p.runSafePointFn != 0 {
+ for _, p2 := range allp {
+ if p2.runSafePointFn != 0 {
throw("forEachP: P did not run fn")
}
}
@@ -1707,20 +1705,20 @@ type cgothreadstart struct {
// id is optional pre-allocated m ID. Omit by passing -1.
//
// This function is allowed to have write barriers even if the caller
-// isn't because it borrows _p_.
+// isn't because it borrows pp.
//
//go:yeswritebarrierrec
-func allocm(_p_ *p, fn func(), id int64) *m {
+func allocm(pp *p, fn func(), id int64) *m {
allocmLock.rlock()
- // The caller owns _p_, but we may borrow (i.e., acquirep) it. We must
+ // The caller owns pp, but we may borrow (i.e., acquirep) it. We must
// disable preemption to ensure it is not stolen, which would make the
// caller lose ownership.
acquirem()
- _g_ := getg()
- if _g_.m.p == 0 {
- acquirep(_p_) // temporarily borrow p for mallocs in this function
+ gp := getg()
+ if gp.m.p == 0 {
+ acquirep(pp) // temporarily borrow p for mallocs in this function
}
// Release the free M list. We need to do this somewhere and
@@ -1761,11 +1759,11 @@ func allocm(_p_ *p, fn func(), id int64) *m {
}
mp.g0.m = mp
- if _p_ == _g_.m.p.ptr() {
+ if pp == gp.m.p.ptr() {
releasep()
}
- releasem(_g_.m)
+ releasem(gp.m)
allocmLock.runlock()
return mp
}
@@ -1859,10 +1857,10 @@ func needm() {
// scheduling stack is, but we assume there's at least 32 kB,
// which is more than enough for us.
setg(mp.g0)
- _g_ := getg()
- _g_.stack.hi = getcallersp() + 1024
- _g_.stack.lo = getcallersp() - 32*1024
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ gp := getg()
+ gp.stack.hi = getcallersp() + 1024
+ gp.stack.lo = getcallersp() - 32*1024
+ gp.stackguard0 = gp.stack.lo + _StackGuard
// Initialize this thread to use the m.
asminit()
@@ -2085,7 +2083,7 @@ var newmHandoff struct {
// id is optional pre-allocated m ID. Omit by passing -1.
//
//go:nowritebarrierrec
-func newm(fn func(), _p_ *p, id int64) {
+func newm(fn func(), pp *p, id int64) {
// allocm adds a new M to allm, but they do not start until created by
// the OS in newm1 or the template thread.
//
@@ -2098,8 +2096,8 @@ func newm(fn func(), _p_ *p, id int64) {
// start.
acquirem()
- mp := allocm(_p_, fn, id)
- mp.nextp.set(_p_)
+ mp := allocm(pp, fn, id)
+ mp.nextp.set(pp)
mp.sigmask = initSigmask
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
// We're on a locked M or a thread that may have been
@@ -2221,24 +2219,24 @@ func templateThread() {
// Stops execution of the current m until new work is available.
// Returns with acquired P.
func stopm() {
- _g_ := getg()
+ gp := getg()
- if _g_.m.locks != 0 {
+ if gp.m.locks != 0 {
throw("stopm holding locks")
}
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
throw("stopm holding p")
}
- if _g_.m.spinning {
+ if gp.m.spinning {
throw("stopm spinning")
}
lock(&sched.lock)
- mput(_g_.m)
+ mput(gp.m)
unlock(&sched.lock)
mPark()
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
+ acquirep(gp.m.nextp.ptr())
+ gp.m.nextp = 0
}
func mspinning() {
@@ -2258,7 +2256,7 @@ func mspinning() {
// Must not have write barriers because this may be called without a P.
//
//go:nowritebarrierrec
-func startm(_p_ *p, spinning bool) {
+func startm(pp *p, spinning bool) {
// Disable preemption.
//
// Every owned P must have an owner that will eventually stop it in the
@@ -2277,9 +2275,9 @@ func startm(_p_ *p, spinning bool) {
// disable preemption before acquiring a P from pidleget below.
mp := acquirem()
lock(&sched.lock)
- if _p_ == nil {
- _p_, _ = pidleget(0)
- if _p_ == nil {
+ if pp == nil {
+ pp, _ = pidleget(0)
+ if pp == nil {
unlock(&sched.lock)
if spinning {
// The caller incremented nmspinning, but there are no idle Ps,
@@ -2314,8 +2312,8 @@ func startm(_p_ *p, spinning bool) {
// The caller incremented nmspinning, so set m.spinning in the new M.
fn = mspinning
}
- newm(fn, _p_, id)
- // Ownership transfer of _p_ committed by start in newm.
+ newm(fn, pp, id)
+ // Ownership transfer of pp committed by start in newm.
// Preemption is now safe.
releasem(mp)
return
@@ -2327,14 +2325,14 @@ func startm(_p_ *p, spinning bool) {
if nmp.nextp != 0 {
throw("startm: m has p")
}
- if spinning && !runqempty(_p_) {
+ if spinning && !runqempty(pp) {
throw("startm: p has runnable gs")
}
// The caller incremented nmspinning, so set m.spinning in the new M.
nmp.spinning = spinning
- nmp.nextp.set(_p_)
+ nmp.nextp.set(pp)
notewakeup(&nmp.park)
- // Ownership transfer of _p_ committed by wakeup. Preemption is now
+ // Ownership transfer of pp committed by wakeup. Preemption is now
// safe.
releasem(mp)
}
@@ -2343,34 +2341,34 @@ func startm(_p_ *p, spinning bool) {
// Always runs without a P, so write barriers are not allowed.
//
//go:nowritebarrierrec
-func handoffp(_p_ *p) {
+func handoffp(pp *p) {
// handoffp must start an M in any situation where
- // findrunnable would return a G to run on _p_.
+ // findrunnable would return a G to run on pp.
// if it has local work, start it straight away
- if !runqempty(_p_) || sched.runqsize != 0 {
- startm(_p_, false)
+ if !runqempty(pp) || sched.runqsize != 0 {
+ startm(pp, false)
return
}
// if there's trace work to do, start it straight away
if (trace.enabled || trace.shutdown) && traceReaderAvailable() {
- startm(_p_, false)
+ startm(pp, false)
return
}
// if it has GC work, start it straight away
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
- startm(_p_, false)
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
+ startm(pp, false)
return
}
// no local work, check that there are no spinning/idle M's,
// otherwise our help is not required
if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
- startm(_p_, true)
+ startm(pp, true)
return
}
lock(&sched.lock)
if sched.gcwaiting != 0 {
- _p_.status = _Pgcstop
+ pp.status = _Pgcstop
sched.stopwait--
if sched.stopwait == 0 {
notewakeup(&sched.stopnote)
@@ -2378,8 +2376,8 @@ func handoffp(_p_ *p) {
unlock(&sched.lock)
return
}
- if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
- sched.safePointFn(_p_)
+ if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
+ sched.safePointFn(pp)
sched.safePointWait--
if sched.safePointWait == 0 {
notewakeup(&sched.safePointNote)
@@ -2387,21 +2385,21 @@ func handoffp(_p_ *p) {
}
if sched.runqsize != 0 {
unlock(&sched.lock)
- startm(_p_, false)
+ startm(pp, false)
return
}
// If this is the last running P and nobody is polling network,
// need to wakeup another M to poll network.
if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
unlock(&sched.lock)
- startm(_p_, false)
+ startm(pp, false)
return
}
// The scheduler lock cannot be held when calling wakeNetPoller below
// because wakeNetPoller may call wakep which may call startm.
- when := nobarrierWakeTime(_p_)
- pidleput(_p_, 0)
+ when := nobarrierWakeTime(pp)
+ pidleput(pp, 0)
unlock(&sched.lock)
if when != 0 {
@@ -2425,27 +2423,27 @@ func wakep() {
// Stops execution of the current m that is locked to a g until the g is runnable again.
// Returns with acquired P.
func stoplockedm() {
- _g_ := getg()
+ gp := getg()
- if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
+ if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
throw("stoplockedm: inconsistent locking")
}
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
// Schedule another M to run this p.
- _p_ := releasep()
- handoffp(_p_)
+ pp := releasep()
+ handoffp(pp)
}
incidlelocked(1)
// Wait until another thread schedules lockedg again.
mPark()
- status := readgstatus(_g_.m.lockedg.ptr())
+ status := readgstatus(gp.m.lockedg.ptr())
if status&^_Gscan != _Grunnable {
print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
- dumpgstatus(_g_.m.lockedg.ptr())
+ dumpgstatus(gp.m.lockedg.ptr())
throw("stoplockedm: not runnable")
}
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
+ acquirep(gp.m.nextp.ptr())
+ gp.m.nextp = 0
}
// Schedules the locked m to run the locked gp.
@@ -2453,10 +2451,8 @@ func stoplockedm() {
//
//go:nowritebarrierrec
func startlockedm(gp *g) {
- _g_ := getg()
-
mp := gp.lockedm.ptr()
- if mp == _g_.m {
+ if mp == getg().m {
throw("startlockedm: locked to me")
}
if mp.nextp != 0 {
@@ -2464,8 +2460,8 @@ func startlockedm(gp *g) {
}
// directly handoff current P to the locked m
incidlelocked(-1)
- _p_ := releasep()
- mp.nextp.set(_p_)
+ pp := releasep()
+ mp.nextp.set(pp)
notewakeup(&mp.park)
stopm()
}
@@ -2473,22 +2469,22 @@ func startlockedm(gp *g) {
// Stops the current m for stopTheWorld.
// Returns when the world is restarted.
func gcstopm() {
- _g_ := getg()
+ gp := getg()
if sched.gcwaiting == 0 {
throw("gcstopm: not waiting for gc")
}
- if _g_.m.spinning {
- _g_.m.spinning = false
+ if gp.m.spinning {
+ gp.m.spinning = false
// OK to just drop nmspinning here,
// startTheWorld will unpark threads as necessary.
if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("gcstopm: negative nmspinning")
}
}
- _p_ := releasep()
+ pp := releasep()
lock(&sched.lock)
- _p_.status = _Pgcstop
+ pp.status = _Pgcstop
sched.stopwait--
if sched.stopwait == 0 {
notewakeup(&sched.stopnote)
@@ -2507,7 +2503,7 @@ func gcstopm() {
//
//go:yeswritebarrierrec
func execute(gp *g, inheritTime bool) {
- _g_ := getg()
+ mp := getg().m
if goroutineProfile.active {
// Make sure that gp has had its stack written out to the goroutine
@@ -2518,19 +2514,19 @@ func execute(gp *g, inheritTime bool) {
// Assign gp.m before entering _Grunning so running Gs have an
// M.
- _g_.m.curg = gp
- gp.m = _g_.m
+ mp.curg = gp
+ gp.m = mp
casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0
gp.preempt = false
gp.stackguard0 = gp.stack.lo + _StackGuard
if !inheritTime {
- _g_.m.p.ptr().schedtick++
+ mp.p.ptr().schedtick++
}
// Check whether the profiler needs to be turned on or off.
hz := sched.profilehz
- if _g_.m.profilehz != hz {
+ if mp.profilehz != hz {
setThreadCPUProfiler(hz)
}
@@ -2551,19 +2547,19 @@ func execute(gp *g, inheritTime bool) {
// tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
// reader) so the caller should try to wake a P.
func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
- _g_ := getg()
+ mp := getg().m
// The conditions here and in handoffp must agree: if
// findrunnable would return a G to run, handoffp must start
// an M.
top:
- _p_ := _g_.m.p.ptr()
+ pp := mp.p.ptr()
if sched.gcwaiting != 0 {
gcstopm()
goto top
}
- if _p_.runSafePointFn != 0 {
+ if pp.runSafePointFn != 0 {
runSafePointFn()
}
@@ -2571,7 +2567,7 @@ top:
// which may steal timers. It's important that between now
// and then, nothing blocks, so these numbers remain mostly
// relevant.
- now, pollUntil, _ := checkTimers(_p_, 0)
+ now, pollUntil, _ := checkTimers(pp, 0)
// Try to schedule the trace reader.
if trace.enabled || trace.shutdown {
@@ -2585,7 +2581,7 @@ top:
// Try to schedule a GC worker.
if gcBlackenEnabled != 0 {
- gp, now = gcController.findRunnableGCWorker(_p_, now)
+ gp, now = gcController.findRunnableGCWorker(pp, now)
if gp != nil {
return gp, false, true
}
@@ -2594,9 +2590,9 @@ top:
// Check the global runnable queue once in a while to ensure fairness.
// Otherwise two goroutines can completely occupy the local runqueue
// by constantly respawning each other.
- if _p_.schedtick%61 == 0 && sched.runqsize > 0 {
+ if pp.schedtick%61 == 0 && sched.runqsize > 0 {
lock(&sched.lock)
- gp = globrunqget(_p_, 1)
+ gp = globrunqget(pp, 1)
unlock(&sched.lock)
if gp != nil {
return gp, false, false
@@ -2614,14 +2610,14 @@ top:
}
// local runq
- if gp, inheritTime := runqget(_p_); gp != nil {
+ if gp, inheritTime := runqget(pp); gp != nil {
return gp, inheritTime, false
}
// global runq
if sched.runqsize != 0 {
lock(&sched.lock)
- gp := globrunqget(_p_, 0)
+ gp := globrunqget(pp, 0)
unlock(&sched.lock)
if gp != nil {
return gp, false, false
@@ -2653,9 +2649,9 @@ top:
// This is necessary to prevent excessive CPU consumption when
// GOMAXPROCS>>1 but the program parallelism is low.
procs := uint32(gomaxprocs)
- if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
- if !_g_.m.spinning {
- _g_.m.spinning = true
+ if mp.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
+ if !mp.spinning {
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
@@ -2680,10 +2676,10 @@ top:
//
// If we're in the GC mark phase, can safely scan and blacken objects,
// and have work to do, run idle-time marking rather than give up the P.
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) && gcController.addIdleMarkWorker() {
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node != nil {
- _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
gp := node.gp.ptr()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
@@ -2722,19 +2718,19 @@ top:
// return P and block
lock(&sched.lock)
- if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
+ if sched.gcwaiting != 0 || pp.runSafePointFn != 0 {
unlock(&sched.lock)
goto top
}
if sched.runqsize != 0 {
- gp := globrunqget(_p_, 0)
+ gp := globrunqget(pp, 0)
unlock(&sched.lock)
return gp, false, false
}
- if releasep() != _p_ {
+ if releasep() != pp {
throw("findrunnable: wrong p")
}
- now = pidleput(_p_, now)
+ now = pidleput(pp, now)
unlock(&sched.lock)
// Delicate dance: thread transitions from spinning to non-spinning
@@ -2757,9 +2753,9 @@ top:
// we also observe no idle Ps it is OK to skip unparking a new worker
// thread: the system is fully loaded so no spinning threads are required.
// Also see "Worker thread parking/unparking" comment at the top of the file.
- wasSpinning := _g_.m.spinning
- if _g_.m.spinning {
- _g_.m.spinning = false
+ wasSpinning := mp.spinning
+ if mp.spinning {
+ mp.spinning = false
if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("findrunnable: negative nmspinning")
}
@@ -2771,23 +2767,23 @@ top:
// latency. See golang.org/issue/43997.
// Check all runqueues once again.
- _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
- if _p_ != nil {
- acquirep(_p_)
- _g_.m.spinning = true
+ pp = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
+ if pp != nil {
+ acquirep(pp)
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
goto top
}
// Check for idle-priority GC work again.
- _p_, gp = checkIdleGCNoP()
- if _p_ != nil {
- acquirep(_p_)
- _g_.m.spinning = true
+ pp, gp = checkIdleGCNoP()
+ if pp != nil {
+ acquirep(pp)
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
// Run the idle worker.
- _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(gp, 0)
@@ -2807,10 +2803,10 @@ top:
// Poll network until next timer.
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
- if _g_.m.p != 0 {
+ if mp.p != 0 {
throw("findrunnable: netpoll with p")
}
- if _g_.m.spinning {
+ if mp.spinning {
throw("findrunnable: netpoll with spinning")
}
// Refresh now.
@@ -2836,12 +2832,12 @@ top:
goto top
}
lock(&sched.lock)
- _p_, _ = pidleget(now)
+ pp, _ = pidleget(now)
unlock(&sched.lock)
- if _p_ == nil {
+ if pp == nil {
injectglist(&list)
} else {
- acquirep(_p_)
+ acquirep(pp)
if !list.empty() {
gp := list.pop()
injectglist(&list)
@@ -2852,7 +2848,7 @@ top:
return gp, false, false
}
if wasSpinning {
- _g_.m.spinning = true
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
goto top
@@ -3087,11 +3083,11 @@ func wakeNetPoller(when int64) {
}
func resetspinning() {
- _g_ := getg()
- if !_g_.m.spinning {
+ gp := getg()
+ if !gp.m.spinning {
throw("resetspinning: not a spinning m")
}
- _g_.m.spinning = false
+ gp.m.spinning = false
nmspinning := atomic.Xadd(&sched.nmspinning, -1)
if int32(nmspinning) < 0 {
throw("findrunnable: negative nmspinning")
@@ -3175,31 +3171,31 @@ func injectglist(glist *gList) {
// One round of scheduler: find a runnable goroutine and execute it.
// Never returns.
func schedule() {
- _g_ := getg()
+ mp := getg().m
- if _g_.m.locks != 0 {
+ if mp.locks != 0 {
throw("schedule: holding locks")
}
- if _g_.m.lockedg != 0 {
+ if mp.lockedg != 0 {
stoplockedm()
- execute(_g_.m.lockedg.ptr(), false) // Never returns.
+ execute(mp.lockedg.ptr(), false) // Never returns.
}
// We should not schedule away from a g that is executing a cgo call,
// since the cgo call is using the m's g0 stack.
- if _g_.m.incgo {
+ if mp.incgo {
throw("schedule: in cgo")
}
top:
- pp := _g_.m.p.ptr()
+ pp := mp.p.ptr()
pp.preempt = false
// Safety check: if we are spinning, the run queue should be empty.
// Check this before calling checkTimers, as that might call
// goready to put a ready goroutine on the local run queue.
- if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
+ if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
throw("schedule: spinning with local work")
}
@@ -3208,7 +3204,7 @@ top:
// This thread is going to run a goroutine and is not spinning anymore,
// so if it was marked as spinning we need to reset it now and potentially
// start a new spinning M.
- if _g_.m.spinning {
+ if mp.spinning {
resetspinning()
}
@@ -3252,10 +3248,10 @@ top:
// readied later, the caller can do other work but eventually should
// call schedule to restart the scheduling of goroutines on this m.
func dropg() {
- _g_ := getg()
+ gp := getg()
- setMNoWB(&_g_.m.curg.m, nil)
- setGNoWB(&_g_.m.curg, nil)
+ setMNoWB(&gp.m.curg.m, nil)
+ setGNoWB(&gp.m.curg, nil)
}
// checkTimers runs any timers for the P that are ready.
@@ -3331,19 +3327,19 @@ func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
// park continuation on g0.
func park_m(gp *g) {
- _g_ := getg()
+ mp := getg().m
if trace.enabled {
- traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
+ traceGoPark(mp.waittraceev, mp.waittraceskip)
}
casgstatus(gp, _Grunning, _Gwaiting)
dropg()
- if fn := _g_.m.waitunlockf; fn != nil {
- ok := fn(gp, _g_.m.waitlock)
- _g_.m.waitunlockf = nil
- _g_.m.waitlock = nil
+ if fn := mp.waitunlockf; fn != nil {
+ ok := fn(gp, mp.waitlock)
+ mp.waitunlockf = nil
+ mp.waitlock = nil
if !ok {
if trace.enabled {
traceGoUnpark(gp, 2)
@@ -3470,18 +3466,18 @@ func goexit1() {
// goexit continuation on g0.
func goexit0(gp *g) {
- _g_ := getg()
- _p_ := _g_.m.p.ptr()
+ mp := getg().m
+ pp := mp.p.ptr()
casgstatus(gp, _Grunning, _Gdead)
- gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo))
+ gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
if isSystemGoroutine(gp, false) {
atomic.Xadd(&sched.ngsys, -1)
}
gp.m = nil
locked := gp.lockedm != 0
gp.lockedm = 0
- _g_.m.lockedg = 0
+ mp.lockedg = 0
gp.preemptStop = false
gp.paniconfault = false
gp._defer = nil // should be true already but just in case.
@@ -3505,15 +3501,15 @@ func goexit0(gp *g) {
dropg()
if GOARCH == "wasm" { // no threads yet on wasm
- gfput(_p_, gp)
+ gfput(pp, gp)
schedule() // never returns
}
- if _g_.m.lockedInt != 0 {
- print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
+ if mp.lockedInt != 0 {
+ print("invalid m->lockedInt = ", mp.lockedInt, "\n")
throw("internal lockOSThread error")
}
- gfput(_p_, gp)
+ gfput(pp, gp)
if locked {
// The goroutine may have locked this thread because
// it put it in an unusual kernel state. Kill it
@@ -3522,11 +3518,11 @@ func goexit0(gp *g) {
// Return to mstart, which will release the P and exit
// the thread.
if GOOS != "plan9" { // See golang.org/issue/22227.
- gogo(&_g_.m.g0.sched)
+ gogo(&mp.g0.sched)
} else {
// Clear lockedExt on plan9 since we may end up re-using
// this thread.
- _g_.m.lockedExt = 0
+ mp.lockedExt = 0
}
}
schedule()
@@ -3541,9 +3537,9 @@ func goexit0(gp *g) {
//go:nosplit
//go:nowritebarrierrec
func save(pc, sp uintptr) {
- _g_ := getg()
+ gp := getg()
- if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
+ if gp == gp.m.g0 || gp == gp.m.gsignal {
// m.g0.sched is special and must describe the context
// for exiting the thread. mstart1 writes to it directly.
// m.gsignal.sched should not be used at all.
@@ -3552,14 +3548,14 @@ func save(pc, sp uintptr) {
throw("save on system g not allowed")
}
- _g_.sched.pc = pc
- _g_.sched.sp = sp
- _g_.sched.lr = 0
- _g_.sched.ret = 0
+ gp.sched.pc = pc
+ gp.sched.sp = sp
+ gp.sched.lr = 0
+ gp.sched.ret = 0
// We need to ensure ctxt is zero, but can't have a write
// barrier here. However, it should always already be zero.
// Assert that.
- if _g_.sched.ctxt != nil {
+ if gp.sched.ctxt != nil {
badctxt()
}
}
@@ -3594,7 +3590,7 @@ func save(pc, sp uintptr) {
// when syscall returns we emit traceGoSysExit and when the goroutine starts running
// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
-// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
+// we remember current value of syscalltick in m (gp.m.syscalltick = gp.m.p.ptr().syscalltick),
// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
// and we wait for the increment before emitting traceGoSysExit.
// Note that the increment is done even if tracing is not enabled,
@@ -3602,27 +3598,27 @@ func save(pc, sp uintptr) {
//
//go:nosplit
func reentersyscall(pc, sp uintptr) {
- _g_ := getg()
+ gp := getg()
// Disable preemption because during this function g is in Gsyscall status,
// but can have inconsistent g->sched, do not let GC observe it.
- _g_.m.locks++
+ gp.m.locks++
// Entersyscall must not call any function that might split/grow the stack.
// (See details in comment above.)
// Catch calls that might, by replacing the stack guard with something that
// will trip any stack check and leaving a flag to tell newstack to die.
- _g_.stackguard0 = stackPreempt
- _g_.throwsplit = true
+ gp.stackguard0 = stackPreempt
+ gp.throwsplit = true
// Leave SP around for GC and traceback.
save(pc, sp)
- _g_.syscallsp = sp
- _g_.syscallpc = pc
- casgstatus(_g_, _Grunning, _Gsyscall)
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ gp.syscallsp = sp
+ gp.syscallpc = pc
+ casgstatus(gp, _Grunning, _Gsyscall)
+ if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
- print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscall")
})
}
@@ -3640,25 +3636,25 @@ func reentersyscall(pc, sp uintptr) {
save(pc, sp)
}
- if _g_.m.p.ptr().runSafePointFn != 0 {
+ if gp.m.p.ptr().runSafePointFn != 0 {
// runSafePointFn may stack split if run on this stack
systemstack(runSafePointFn)
save(pc, sp)
}
- _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
- _g_.sysblocktraced = true
- pp := _g_.m.p.ptr()
+ gp.m.syscalltick = gp.m.p.ptr().syscalltick
+ gp.sysblocktraced = true
+ pp := gp.m.p.ptr()
pp.m = 0
- _g_.m.oldp.set(pp)
- _g_.m.p = 0
+ gp.m.oldp.set(pp)
+ gp.m.p = 0
atomic.Store(&pp.status, _Psyscall)
if sched.gcwaiting != 0 {
systemstack(entersyscall_gcwait)
save(pc, sp)
}
- _g_.m.locks--
+ gp.m.locks--
}
// Standard syscall entry used by the go syscall library and normal cgo calls.
@@ -3681,16 +3677,16 @@ func entersyscall_sysmon() {
}
func entersyscall_gcwait() {
- _g_ := getg()
- _p_ := _g_.m.oldp.ptr()
+ gp := getg()
+ pp := gp.m.oldp.ptr()
lock(&sched.lock)
- if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
+ if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
if trace.enabled {
- traceGoSysBlock(_p_)
- traceProcStop(_p_)
+ traceGoSysBlock(pp)
+ traceProcStop(pp)
}
- _p_.syscalltick++
+ pp.syscalltick++
if sched.stopwait--; sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
@@ -3702,34 +3698,34 @@ func entersyscall_gcwait() {
//
//go:nosplit
func entersyscallblock() {
- _g_ := getg()
+ gp := getg()
- _g_.m.locks++ // see comment in entersyscall
- _g_.throwsplit = true
- _g_.stackguard0 = stackPreempt // see comment in entersyscall
- _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
- _g_.sysblocktraced = true
- _g_.m.p.ptr().syscalltick++
+ gp.m.locks++ // see comment in entersyscall
+ gp.throwsplit = true
+ gp.stackguard0 = stackPreempt // see comment in entersyscall
+ gp.m.syscalltick = gp.m.p.ptr().syscalltick
+ gp.sysblocktraced = true
+ gp.m.p.ptr().syscalltick++
// Leave SP around for GC and traceback.
pc := getcallerpc()
sp := getcallersp()
save(pc, sp)
- _g_.syscallsp = _g_.sched.sp
- _g_.syscallpc = _g_.sched.pc
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ gp.syscallsp = gp.sched.sp
+ gp.syscallpc = gp.sched.pc
+ if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
sp1 := sp
- sp2 := _g_.sched.sp
- sp3 := _g_.syscallsp
+ sp2 := gp.sched.sp
+ sp3 := gp.syscallsp
systemstack(func() {
- print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscallblock")
})
}
- casgstatus(_g_, _Grunning, _Gsyscall)
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ casgstatus(gp, _Grunning, _Gsyscall)
+ if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
- print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscallblock")
})
}
@@ -3739,7 +3735,7 @@ func entersyscallblock() {
// Resave for traceback during blocked call.
save(getcallerpc(), getcallersp())
- _g_.m.locks--
+ gp.m.locks--
}
func entersyscallblock_handoff() {
@@ -3763,16 +3759,16 @@ func entersyscallblock_handoff() {
//go:nowritebarrierrec
//go:linkname exitsyscall
func exitsyscall() {
- _g_ := getg()
+ gp := getg()
- _g_.m.locks++ // see comment in entersyscall
- if getcallersp() > _g_.syscallsp {
+ gp.m.locks++ // see comment in entersyscall
+ if getcallersp() > gp.syscallsp {
throw("exitsyscall: syscall frame is no longer valid")
}
- _g_.waitsince = 0
- oldp := _g_.m.oldp.ptr()
- _g_.m.oldp = 0
+ gp.waitsince = 0
+ oldp := gp.m.oldp.ptr()
+ gp.m.oldp = 0
if exitsyscallfast(oldp) {
// When exitsyscallfast returns success, we have a P so can now use
// write barriers
@@ -3781,33 +3777,33 @@ func exitsyscall() {
// profile, exactly as it was when the goroutine profiler first
// stopped the world.
systemstack(func() {
- tryRecordGoroutineProfileWB(_g_)
+ tryRecordGoroutineProfileWB(gp)
})
}
if trace.enabled {
- if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
systemstack(traceGoStart)
}
}
// There's a cpu for us, so we can run.
- _g_.m.p.ptr().syscalltick++
+ gp.m.p.ptr().syscalltick++
// We need to cas the status and scan before resuming...
- casgstatus(_g_, _Gsyscall, _Grunning)
+ casgstatus(gp, _Gsyscall, _Grunning)
// Garbage collector isn't running (since we are),
// so okay to clear syscallsp.
- _g_.syscallsp = 0
- _g_.m.locks--
- if _g_.preempt {
+ gp.syscallsp = 0
+ gp.m.locks--
+ if gp.preempt {
// restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ gp.stackguard0 = stackPreempt
} else {
// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
}
- _g_.throwsplit = false
+ gp.throwsplit = false
- if sched.disable.user && !schedEnabled(_g_) {
+ if sched.disable.user && !schedEnabled(gp) {
// Scheduling of this goroutine is disabled.
Gosched()
}
@@ -3815,21 +3811,21 @@ func exitsyscall() {
return
}
- _g_.sysexitticks = 0
+ gp.sysexitticks = 0
if trace.enabled {
// Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
- for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
+ for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
osyield()
}
// We can't trace syscall exit right now because we don't have a P.
// Tracing code can invoke write barriers that cannot run without a P.
// So instead we remember the syscall exit time and emit the event
// in execute when we have a P.
- _g_.sysexitticks = cputicks()
+ gp.sysexitticks = cputicks()
}
- _g_.m.locks--
+ gp.m.locks--
// Call the scheduler.
mcall(exitsyscall0)
@@ -3840,14 +3836,14 @@ func exitsyscall() {
// Must wait until now because until gosched returns
// we don't know for sure that the garbage collector
// is not running.
- _g_.syscallsp = 0
- _g_.m.p.ptr().syscalltick++
- _g_.throwsplit = false
+ gp.syscallsp = 0
+ gp.m.p.ptr().syscalltick++
+ gp.throwsplit = false
}
//go:nosplit
func exitsyscallfast(oldp *p) bool {
- _g_ := getg()
+ gp := getg()
// Freezetheworld sets stopwait but does not retake P's.
if sched.stopwait == freezeStopWait {
@@ -3871,7 +3867,7 @@ func exitsyscallfast(oldp *p) bool {
if oldp != nil {
// Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
- for oldp.syscalltick == _g_.m.syscalltick {
+ for oldp.syscalltick == gp.m.syscalltick {
osyield()
}
}
@@ -3891,33 +3887,33 @@ func exitsyscallfast(oldp *p) bool {
//
//go:nosplit
func exitsyscallfast_reacquired() {
- _g_ := getg()
- if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ gp := getg()
+ if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
if trace.enabled {
- // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
+ // The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
// traceGoSysBlock for this syscall was already emitted,
// but here we effectively retake the p from the new syscall running on the same p.
systemstack(func() {
// Denote blocking of the new syscall.
- traceGoSysBlock(_g_.m.p.ptr())
+ traceGoSysBlock(gp.m.p.ptr())
// Denote completion of the current syscall.
traceGoSysExit(0)
})
}
- _g_.m.p.ptr().syscalltick++
+ gp.m.p.ptr().syscalltick++
}
}
func exitsyscallfast_pidle() bool {
lock(&sched.lock)
- _p_, _ := pidleget(0)
- if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
+ pp, _ := pidleget(0)
+ if pp != nil && atomic.Load(&sched.sysmonwait) != 0 {
atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
- if _p_ != nil {
- acquirep(_p_)
+ if pp != nil {
+ acquirep(pp)
return true
}
return false
@@ -3933,12 +3929,12 @@ func exitsyscall0(gp *g) {
casgstatus(gp, _Gsyscall, _Grunnable)
dropg()
lock(&sched.lock)
- var _p_ *p
+ var pp *p
if schedEnabled(gp) {
- _p_, _ = pidleget(0)
+ pp, _ = pidleget(0)
}
var locked bool
- if _p_ == nil {
+ if pp == nil {
globrunqput(gp)
// Below, we stoplockedm if gp is locked. globrunqput releases
@@ -3952,8 +3948,8 @@ func exitsyscall0(gp *g) {
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
- if _p_ != nil {
- acquirep(_p_)
+ if pp != nil {
+ acquirep(pp)
execute(gp, false) // Never returns.
}
if locked {
@@ -4089,8 +4085,8 @@ func newproc(fn *funcval) {
systemstack(func() {
newg := newproc1(fn, gp, pc)
- _p_ := getg().m.p.ptr()
- runqput(_p_, newg, true)
+ pp := getg().m.p.ptr()
+ runqput(pp, newg, true)
if mainStarted {
wakep()
@@ -4102,15 +4098,13 @@ func newproc(fn *funcval) {
// address of the go statement that created this. The caller is responsible
// for adding the new g to the scheduler.
func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
- _g_ := getg()
-
if fn == nil {
fatal("go of nil func value")
}
- acquirem() // disable preemption because it can be holding p in a local var
- _p_ := _g_.m.p.ptr()
- newg := gfget(_p_)
+ mp := acquirem() // disable preemption because we hold M and P in local vars.
+ pp := mp.p.ptr()
+ newg := gfget(pp)
if newg == nil {
newg = malg(_StackMin)
casgstatus(newg, _Gidle, _Gdead)
@@ -4148,8 +4142,8 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
atomic.Xadd(&sched.ngsys, +1)
} else {
// Only user goroutines inherit pprof labels.
- if _g_.m.curg != nil {
- newg.labels = _g_.m.curg.labels
+ if mp.curg != nil {
+ newg.labels = mp.curg.labels
}
if goroutineProfile.active {
// A concurrent goroutine profile is running. It should include
@@ -4166,18 +4160,18 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
newg.tracking = true
}
casgstatus(newg, _Gdead, _Grunnable)
- gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))
+ gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
- if _p_.goidcache == _p_.goidcacheend {
+ if pp.goidcache == pp.goidcacheend {
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
- _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
- _p_.goidcache -= _GoidCacheBatch - 1
- _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
+ pp.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
+ pp.goidcache -= _GoidCacheBatch - 1
+ pp.goidcacheend = pp.goidcache + _GoidCacheBatch
}
- newg.goid = int64(_p_.goidcache)
- _p_.goidcache++
+ newg.goid = int64(pp.goidcache)
+ pp.goidcache++
if raceenabled {
newg.racectx = racegostart(callerpc)
if newg.labels != nil {
@@ -4189,7 +4183,7 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
if trace.enabled {
traceGoCreate(newg, newg.startpc)
}
- releasem(_g_.m)
+ releasem(mp)
return newg
}
@@ -4230,7 +4224,7 @@ func saveAncestors(callergp *g) *[]ancestorInfo {
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
-func gfput(_p_ *p, gp *g) {
+func gfput(pp *p, gp *g) {
if readgstatus(gp) != _Gdead {
throw("gfput: bad status (not Gdead)")
}
@@ -4245,17 +4239,17 @@ func gfput(_p_ *p, gp *g) {
gp.stackguard0 = 0
}
- _p_.gFree.push(gp)
- _p_.gFree.n++
- if _p_.gFree.n >= 64 {
+ pp.gFree.push(gp)
+ pp.gFree.n++
+ if pp.gFree.n >= 64 {
var (
inc int32
stackQ gQueue
noStackQ gQueue
)
- for _p_.gFree.n >= 32 {
- gp = _p_.gFree.pop()
- _p_.gFree.n--
+ for pp.gFree.n >= 32 {
+ gp = pp.gFree.pop()
+ pp.gFree.n--
if gp.stack.lo == 0 {
noStackQ.push(gp)
} else {
@@ -4273,12 +4267,12 @@ func gfput(_p_ *p, gp *g) {
// Get from gfree list.
// If local list is empty, grab a batch from global list.
-func gfget(_p_ *p) *g {
+func gfget(pp *p) *g {
retry:
- if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
+ if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
lock(&sched.gFree.lock)
// Move a batch of free Gs to the P.
- for _p_.gFree.n < 32 {
+ for pp.gFree.n < 32 {
// Prefer Gs with stacks.
gp := sched.gFree.stack.pop()
if gp == nil {
@@ -4288,17 +4282,17 @@ retry:
}
}
sched.gFree.n--
- _p_.gFree.push(gp)
- _p_.gFree.n++
+ pp.gFree.push(gp)
+ pp.gFree.n++
}
unlock(&sched.gFree.lock)
goto retry
}
- gp := _p_.gFree.pop()
+ gp := pp.gFree.pop()
if gp == nil {
return nil
}
- _p_.gFree.n--
+ pp.gFree.n--
if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
// Deallocate old stack. We kept it in gfput because it was the
// right size when the goroutine was put on the free list, but
@@ -4331,15 +4325,15 @@ retry:
}
// Purge all cached G's from gfree list to the global list.
-func gfpurge(_p_ *p) {
+func gfpurge(pp *p) {
var (
inc int32
stackQ gQueue
noStackQ gQueue
)
- for !_p_.gFree.empty() {
- gp := _p_.gFree.pop()
- _p_.gFree.n--
+ for !pp.gFree.empty() {
+ gp := pp.gFree.pop()
+ pp.gFree.n--
if gp.stack.lo == 0 {
noStackQ.push(gp)
} else {
@@ -4368,9 +4362,9 @@ func dolockOSThread() {
if GOARCH == "wasm" {
return // no threads on wasm yet
}
- _g_ := getg()
- _g_.m.lockedg.set(_g_)
- _g_.lockedm.set(_g_.m)
+ gp := getg()
+ gp.m.lockedg.set(gp)
+ gp.lockedm.set(gp.m)
}
//go:nosplit
@@ -4396,10 +4390,10 @@ func LockOSThread() {
// while we're in a known-good state.
startTemplateThread()
}
- _g_ := getg()
- _g_.m.lockedExt++
- if _g_.m.lockedExt == 0 {
- _g_.m.lockedExt--
+ gp := getg()
+ gp.m.lockedExt++
+ if gp.m.lockedExt == 0 {
+ gp.m.lockedExt--
panic("LockOSThread nesting overflow")
}
dolockOSThread()
@@ -4420,12 +4414,12 @@ func dounlockOSThread() {
if GOARCH == "wasm" {
return // no threads on wasm yet
}
- _g_ := getg()
- if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
+ gp := getg()
+ if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
return
}
- _g_.m.lockedg = 0
- _g_.lockedm = 0
+ gp.m.lockedg = 0
+ gp.lockedm = 0
}
//go:nosplit
@@ -4443,21 +4437,21 @@ func dounlockOSThread() {
// the goroutine locked to the OS thread until the goroutine (and
// hence the thread) exits.
func UnlockOSThread() {
- _g_ := getg()
- if _g_.m.lockedExt == 0 {
+ gp := getg()
+ if gp.m.lockedExt == 0 {
return
}
- _g_.m.lockedExt--
+ gp.m.lockedExt--
dounlockOSThread()
}
//go:nosplit
func unlockOSThread() {
- _g_ := getg()
- if _g_.m.lockedInt == 0 {
+ gp := getg()
+ if gp.m.lockedInt == 0 {
systemstack(badunlockosthread)
}
- _g_.m.lockedInt--
+ gp.m.lockedInt--
dounlockOSThread()
}
@@ -4467,8 +4461,8 @@ func badunlockosthread() {
func gcount() int32 {
n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
- for _, _p_ := range allp {
- n -= _p_.gFree.n
+ for _, pp := range allp {
+ n -= pp.gFree.n
}
// All these variables can be changed concurrently, so the result can be inconsistent.
@@ -4630,8 +4624,8 @@ func setcpuprofilerate(hz int32) {
// Disable preemption, otherwise we can be rescheduled to another thread
// that has profiling enabled.
- _g_ := getg()
- _g_.m.locks++
+ gp := getg()
+ gp.m.locks++
// Stop profiler on this thread so that it is safe to lock prof.
// if a profiling signal came in while we had prof locked,
@@ -4655,7 +4649,7 @@ func setcpuprofilerate(hz int32) {
setThreadCPUProfiler(hz)
}
- _g_.m.locks--
+ gp.m.locks--
}
// init initializes pp, which may be a freshly allocated p or a
@@ -4852,32 +4846,32 @@ func procresize(nprocs int32) *p {
atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
}
- _g_ := getg()
- if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
+ gp := getg()
+ if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
// continue to use the current P
- _g_.m.p.ptr().status = _Prunning
- _g_.m.p.ptr().mcache.prepareForSweep()
+ gp.m.p.ptr().status = _Prunning
+ gp.m.p.ptr().mcache.prepareForSweep()
} else {
// release the current P and acquire allp[0].
//
// We must do this before destroying our current P
// because p.destroy itself has write barriers, so we
// need to do that from a valid P.
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
if trace.enabled {
// Pretend that we were descheduled
// and then scheduled again to keep
// the trace sane.
traceGoSched()
- traceProcStop(_g_.m.p.ptr())
+ traceProcStop(gp.m.p.ptr())
}
- _g_.m.p.ptr().m = 0
+ gp.m.p.ptr().m = 0
}
- _g_.m.p = 0
- p := allp[0]
- p.m = 0
- p.status = _Pidle
- acquirep(p)
+ gp.m.p = 0
+ pp := allp[0]
+ pp.m = 0
+ pp.status = _Pidle
+ acquirep(pp)
if trace.enabled {
traceGoStart()
}
@@ -4888,8 +4882,8 @@ func procresize(nprocs int32) *p {
// release resources from unused P's
for i := nprocs; i < old; i++ {
- p := allp[i]
- p.destroy()
+ pp := allp[i]
+ pp.destroy()
// can't free P itself because it can be referenced by an M in syscall
}
@@ -4904,17 +4898,17 @@ func procresize(nprocs int32) *p {
var runnablePs *p
for i := nprocs - 1; i >= 0; i-- {
- p := allp[i]
- if _g_.m.p.ptr() == p {
+ pp := allp[i]
+ if gp.m.p.ptr() == pp {
continue
}
- p.status = _Pidle
- if runqempty(p) {
- pidleput(p, now)
+ pp.status = _Pidle
+ if runqempty(pp) {
+ pidleput(pp, now)
} else {
- p.m.set(mget())
- p.link.set(runnablePs)
- runnablePs = p
+ pp.m.set(mget())
+ pp.link.set(runnablePs)
+ runnablePs = pp
}
}
stealOrder.reset(uint32(nprocs))
@@ -4930,18 +4924,18 @@ func procresize(nprocs int32) *p {
// Associate p and the current m.
//
// This function is allowed to have write barriers even if the caller
-// isn't because it immediately acquires _p_.
+// isn't because it immediately acquires pp.
//
//go:yeswritebarrierrec
-func acquirep(_p_ *p) {
+func acquirep(pp *p) {
// Do the part that isn't allowed to have write barriers.
- wirep(_p_)
+ wirep(pp)
// Have p; write barriers now allowed.
// Perform deferred mcache flush before this P can allocate
// from a potentially stale mcache.
- _p_.mcache.prepareForSweep()
+ pp.mcache.prepareForSweep()
if trace.enabled {
traceProcStart()
@@ -4949,49 +4943,49 @@ func acquirep(_p_ *p) {
}
// wirep is the first step of acquirep, which actually associates the
-// current M to _p_. This is broken out so we can disallow write
+// current M to pp. This is broken out so we can disallow write
// barriers for this part, since we don't yet have a P.
//
//go:nowritebarrierrec
//go:nosplit
-func wirep(_p_ *p) {
- _g_ := getg()
+func wirep(pp *p) {
+ gp := getg()
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
throw("wirep: already in go")
}
- if _p_.m != 0 || _p_.status != _Pidle {
+ if pp.m != 0 || pp.status != _Pidle {
id := int64(0)
- if _p_.m != 0 {
- id = _p_.m.ptr().id
+ if pp.m != 0 {
+ id = pp.m.ptr().id
}
- print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
+ print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
throw("wirep: invalid p state")
}
- _g_.m.p.set(_p_)
- _p_.m.set(_g_.m)
- _p_.status = _Prunning
+ gp.m.p.set(pp)
+ pp.m.set(gp.m)
+ pp.status = _Prunning
}
// Disassociate p and the current m.
func releasep() *p {
- _g_ := getg()
+ gp := getg()
- if _g_.m.p == 0 {
+ if gp.m.p == 0 {
throw("releasep: invalid arg")
}
- _p_ := _g_.m.p.ptr()
- if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
- print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
+ pp := gp.m.p.ptr()
+ if pp.m.ptr() != gp.m || pp.status != _Prunning {
+ print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
throw("releasep: invalid p state")
}
if trace.enabled {
- traceProcStop(_g_.m.p.ptr())
+ traceProcStop(gp.m.p.ptr())
}
- _g_.m.p = 0
- _p_.m = 0
- _p_.status = _Pidle
- return _p_
+ gp.m.p = 0
+ pp.m = 0
+ pp.status = _Pidle
+ return pp
}
func incidlelocked(v int32) {
@@ -5099,8 +5093,8 @@ func checkdead() {
}
// There are no goroutines running, so we can look at the P's.
- for _, _p_ := range allp {
- if len(_p_.timers) > 0 {
+ for _, pp := range allp {
+ if len(pp.timers) > 0 {
return
}
}
@@ -5289,23 +5283,23 @@ func retake(now int64) uint32 {
// temporarily drop the allpLock. Hence, we need to re-fetch
// allp each time around the loop.
for i := 0; i < len(allp); i++ {
- _p_ := allp[i]
- if _p_ == nil {
+ pp := allp[i]
+ if pp == nil {
// This can happen if procresize has grown
// allp but not yet created new Ps.
continue
}
- pd := &_p_.sysmontick
- s := _p_.status
+ pd := &pp.sysmontick
+ s := pp.status
sysretake := false
if s == _Prunning || s == _Psyscall {
// Preempt G if it's running for too long.
- t := int64(_p_.schedtick)
+ t := int64(pp.schedtick)
if int64(pd.schedtick) != t {
pd.schedtick = uint32(t)
pd.schedwhen = now
} else if pd.schedwhen+forcePreemptNS <= now {
- preemptone(_p_)
+ preemptone(pp)
// In case of syscall, preemptone() doesn't
// work, because there is no M wired to P.
sysretake = true
@@ -5313,7 +5307,7 @@ func retake(now int64) uint32 {
}
if s == _Psyscall {
// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
- t := int64(_p_.syscalltick)
+ t := int64(pp.syscalltick)
if !sysretake && int64(pd.syscalltick) != t {
pd.syscalltick = uint32(t)
pd.syscallwhen = now
@@ -5322,7 +5316,7 @@ func retake(now int64) uint32 {
// On the one hand we don't want to retake Ps if there is no other work to do,
// but on the other hand we want to retake them eventually
// because they can prevent the sysmon thread from deep sleep.
- if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
+ if runqempty(pp) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
continue
}
// Drop allpLock so we can take sched.lock.
@@ -5332,14 +5326,14 @@ func retake(now int64) uint32 {
// Otherwise the M from which we retake can exit the syscall,
// increment nmidle and report deadlock.
incidlelocked(-1)
- if atomic.Cas(&_p_.status, s, _Pidle) {
+ if atomic.Cas(&pp.status, s, _Pidle) {
if trace.enabled {
- traceGoSysBlock(_p_)
- traceProcStop(_p_)
+ traceGoSysBlock(pp)
+ traceProcStop(pp)
}
n++
- _p_.syscalltick++
- handoffp(_p_)
+ pp.syscalltick++
+ handoffp(pp)
}
incidlelocked(1)
lock(&allpLock)
@@ -5356,11 +5350,11 @@ func retake(now int64) uint32 {
// Returns true if preemption request was issued to at least one goroutine.
func preemptall() bool {
res := false
- for _, _p_ := range allp {
- if _p_.status != _Prunning {
+ for _, pp := range allp {
+ if pp.status != _Prunning {
continue
}
- if preemptone(_p_) {
+ if preemptone(pp) {
res = true
}
}
@@ -5377,8 +5371,8 @@ func preemptall() bool {
// The actual preemption will happen at some point in the future
// and will be indicated by the gp->status no longer being
// Grunning
-func preemptone(_p_ *p) bool {
- mp := _p_.m.ptr()
+func preemptone(pp *p) bool {
+ mp := pp.m.ptr()
if mp == nil || mp == getg().m {
return false
}
@@ -5397,7 +5391,7 @@ func preemptone(_p_ *p) bool {
// Request an async preemption of this P.
if preemptMSupported && debug.asyncpreemptoff == 0 {
- _p_.preempt = true
+ pp.preempt = true
preemptM(mp)
}
@@ -5420,16 +5414,16 @@ func schedtrace(detailed bool) {
// We must be careful while reading data from P's, M's and G's.
// Even if we hold schedlock, most data can be changed concurrently.
// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
- for i, _p_ := range allp {
- mp := _p_.m.ptr()
- h := atomic.Load(&_p_.runqhead)
- t := atomic.Load(&_p_.runqtail)
+ for i, pp := range allp {
+ mp := pp.m.ptr()
+ h := atomic.Load(&pp.runqhead)
+ t := atomic.Load(&pp.runqtail)
if detailed {
id := int64(-1)
if mp != nil {
id = mp.id
}
- print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
+ print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n")
} else {
// In non-detailed mode format lengths of per-P run queues as:
// [len1 len2 len3 len4]
@@ -5450,12 +5444,12 @@ func schedtrace(detailed bool) {
}
for mp := allm; mp != nil; mp = mp.alllink {
- _p_ := mp.p.ptr()
+ pp := mp.p.ptr()
gp := mp.curg
lockedg := mp.lockedg.ptr()
id1 := int32(-1)
- if _p_ != nil {
- id1 = _p_.id
+ if pp != nil {
+ id1 = pp.id
}
id2 := int64(-1)
if gp != nil {
@@ -5592,7 +5586,7 @@ func globrunqputbatch(batch *gQueue, n int32) {
// Try get a batch of G's from the global runnable queue.
// sched.lock must be held.
-func globrunqget(_p_ *p, max int32) *g {
+func globrunqget(pp *p, max int32) *g {
assertLockHeld(&sched.lock)
if sched.runqsize == 0 {
@@ -5606,8 +5600,8 @@ func globrunqget(_p_ *p, max int32) *g {
if max > 0 && n > max {
n = max
}
- if n > int32(len(_p_.runq))/2 {
- n = int32(len(_p_.runq)) / 2
+ if n > int32(len(pp.runq))/2 {
+ n = int32(len(pp.runq)) / 2
}
sched.runqsize -= n
@@ -5616,7 +5610,7 @@ func globrunqget(_p_ *p, max int32) *g {
n--
for ; n > 0; n-- {
gp1 := sched.runq.pop()
- runqput(_p_, gp1, false)
+ runqput(pp, gp1, false)
}
return gp
}
@@ -5696,21 +5690,21 @@ func updateTimerPMask(pp *p) {
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
-func pidleput(_p_ *p, now int64) int64 {
+func pidleput(pp *p, now int64) int64 {
assertLockHeld(&sched.lock)
- if !runqempty(_p_) {
+ if !runqempty(pp) {
throw("pidleput: P has non-empty run queue")
}
if now == 0 {
now = nanotime()
}
- updateTimerPMask(_p_) // clear if there are no timers.
- idlepMask.set(_p_.id)
- _p_.link = sched.pidle
- sched.pidle.set(_p_)
+ updateTimerPMask(pp) // clear if there are no timers.
+ idlepMask.set(pp.id)
+ pp.link = sched.pidle
+ sched.pidle.set(pp)
atomic.Xadd(&sched.npidle, 1)
- if !_p_.limiterEvent.start(limiterEventIdle, now) {
+ if !pp.limiterEvent.start(limiterEventIdle, now) {
throw("must be able to track idle limiter event")
}
return now
@@ -5726,33 +5720,33 @@ func pidleput(_p_ *p, now int64) int64 {
func pidleget(now int64) (*p, int64) {
assertLockHeld(&sched.lock)
- _p_ := sched.pidle.ptr()
- if _p_ != nil {
+ pp := sched.pidle.ptr()
+ if pp != nil {
// Timer may get added at any time now.
if now == 0 {
now = nanotime()
}
- timerpMask.set(_p_.id)
- idlepMask.clear(_p_.id)
- sched.pidle = _p_.link
+ timerpMask.set(pp.id)
+ idlepMask.clear(pp.id)
+ sched.pidle = pp.link
atomic.Xadd(&sched.npidle, -1)
- _p_.limiterEvent.stop(limiterEventIdle, now)
+ pp.limiterEvent.stop(limiterEventIdle, now)
}
- return _p_, now
+ return pp, now
}
-// runqempty reports whether _p_ has no Gs on its local run queue.
+// runqempty reports whether pp has no Gs on its local run queue.
// It never returns true spuriously.
-func runqempty(_p_ *p) bool {
- // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
- // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
+func runqempty(pp *p) bool {
+ // Defend against a race where 1) pp has G1 in runqnext but runqhead == runqtail,
+ // 2) runqput on pp kicks G1 to the runq, 3) runqget on pp empties runqnext.
// Simply observing that runqhead == runqtail and then observing that runqnext == nil
// does not mean the queue is empty.
for {
- head := atomic.Load(&_p_.runqhead)
- tail := atomic.Load(&_p_.runqtail)
- runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
- if tail == atomic.Load(&_p_.runqtail) {
+ head := atomic.Load(&pp.runqhead)
+ tail := atomic.Load(&pp.runqtail)
+ runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
+ if tail == atomic.Load(&pp.runqtail) {
return head == tail && runnext == 0
}
}
@@ -5771,18 +5765,18 @@ const randomizeScheduler = raceenabled
// runqput tries to put g on the local runnable queue.
// If next is false, runqput adds g to the tail of the runnable queue.
-// If next is true, runqput puts g in the _p_.runnext slot.
+// If next is true, runqput puts g in the pp.runnext slot.
// If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P.
-func runqput(_p_ *p, gp *g, next bool) {
+func runqput(pp *p, gp *g, next bool) {
if randomizeScheduler && next && fastrandn(2) == 0 {
next = false
}
if next {
retryNext:
- oldnext := _p_.runnext
- if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
+ oldnext := pp.runnext
+ if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
goto retryNext
}
if oldnext == 0 {
@@ -5793,14 +5787,14 @@ func runqput(_p_ *p, gp *g, next bool) {
}
retry:
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
- t := _p_.runqtail
- if t-h < uint32(len(_p_.runq)) {
- _p_.runq[t%uint32(len(_p_.runq))].set(gp)
- atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
+ t := pp.runqtail
+ if t-h < uint32(len(pp.runq)) {
+ pp.runq[t%uint32(len(pp.runq))].set(gp)
+ atomic.StoreRel(&pp.runqtail, t+1) // store-release, makes the item available for consumption
return
}
- if runqputslow(_p_, gp, h, t) {
+ if runqputslow(pp, gp, h, t) {
return
}
// the queue is not full, now the put above must succeed
@@ -5809,19 +5803,19 @@ retry:
// Put g and a batch of work from local runnable queue on global queue.
// Executed only by the owner P.
-func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
- var batch [len(_p_.runq)/2 + 1]*g
+func runqputslow(pp *p, gp *g, h, t uint32) bool {
+ var batch [len(pp.runq)/2 + 1]*g
// First, grab a batch from local queue.
n := t - h
n = n / 2
- if n != uint32(len(_p_.runq)/2) {
+ if n != uint32(len(pp.runq)/2) {
throw("runqputslow: queue is not full")
}
for i := uint32(0); i < n; i++ {
- batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
+ batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
}
- if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if !atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
return false
}
batch[n] = gp
@@ -5886,50 +5880,50 @@ func runqputbatch(pp *p, q *gQueue, qsize int) {
// If inheritTime is true, gp should inherit the remaining time in the
// current time slice. Otherwise, it should start a new time slice.
// Executed only by the owner P.
-func runqget(_p_ *p) (gp *g, inheritTime bool) {
+func runqget(pp *p) (gp *g, inheritTime bool) {
// If there's a runnext, it's the next G to run.
- next := _p_.runnext
+ next := pp.runnext
// If the runnext is non-0 and the CAS fails, it could only have been stolen by another P,
// because other Ps can race to set runnext to 0, but only the current P can set it to non-0.
// Hence, there's no need to retry this CAS if it falls.
- if next != 0 && _p_.runnext.cas(next, 0) {
+ if next != 0 && pp.runnext.cas(next, 0) {
return next.ptr(), true
}
for {
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := _p_.runqtail
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
+ t := pp.runqtail
if t == h {
return nil, false
}
- gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
- if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume
+ gp := pp.runq[h%uint32(len(pp.runq))].ptr()
+ if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume
return gp, false
}
}
}
-// runqdrain drains the local runnable queue of _p_ and returns all goroutines in it.
+// runqdrain drains the local runnable queue of pp and returns all goroutines in it.
// Executed only by the owner P.
-func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
- oldNext := _p_.runnext
- if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
+func runqdrain(pp *p) (drainQ gQueue, n uint32) {
+ oldNext := pp.runnext
+ if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
drainQ.pushBack(oldNext.ptr())
n++
}
retry:
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := _p_.runqtail
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
+ t := pp.runqtail
qn := t - h
if qn == 0 {
return
}
- if qn > uint32(len(_p_.runq)) { // read inconsistent h and t
+ if qn > uint32(len(pp.runq)) { // read inconsistent h and t
goto retry
}
- if !atomic.CasRel(&_p_.runqhead, h, h+qn) { // cas-release, commits consume
+ if !atomic.CasRel(&pp.runqhead, h, h+qn) { // cas-release, commits consume
goto retry
}
@@ -5941,34 +5935,34 @@ retry:
// meanwhile, other P's can't access to all G's in local P's runnable queue and steal them.
// See https://groups.google.com/g/golang-dev/c/0pTKxEKhHSc/m/6Q85QjdVBQAJ for more details.
for i := uint32(0); i < qn; i++ {
- gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
+ gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
drainQ.pushBack(gp)
n++
}
return
}
-// Grabs a batch of goroutines from _p_'s runnable queue into batch.
+// Grabs a batch of goroutines from pp's runnable queue into batch.
// Batch is a ring buffer starting at batchHead.
// Returns number of grabbed goroutines.
// Can be executed by any P.
-func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
+func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
for {
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
+ t := atomic.LoadAcq(&pp.runqtail) // load-acquire, synchronize with the producer
n := t - h
n = n - n/2
if n == 0 {
if stealRunNextG {
- // Try to steal from _p_.runnext.
- if next := _p_.runnext; next != 0 {
- if _p_.status == _Prunning {
- // Sleep to ensure that _p_ isn't about to run the g
+ // Try to steal from pp.runnext.
+ if next := pp.runnext; next != 0 {
+ if pp.status == _Prunning {
+ // Sleep to ensure that pp isn't about to run the g
// we are about to steal.
// The important use case here is when the g running
- // on _p_ ready()s another g and then almost
+ // on pp ready()s another g and then almost
// immediately blocks. Instead of stealing runnext
- // in this window, back off to give _p_ a chance to
+ // in this window, back off to give pp a chance to
// schedule runnext. This will avoid thrashing gs
// between different Ps.
// A sync chan send/recv takes ~50ns as of time of
@@ -5982,7 +5976,7 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
osyield()
}
}
- if !_p_.runnext.cas(next, 0) {
+ if !pp.runnext.cas(next, 0) {
continue
}
batch[batchHead%uint32(len(batch))] = next
@@ -5991,14 +5985,14 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
}
return 0
}
- if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
+ if n > uint32(len(pp.runq)/2) { // read inconsistent h and t
continue
}
for i := uint32(0); i < n; i++ {
- g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
+ g := pp.runq[(h+i)%uint32(len(pp.runq))]
batch[(batchHead+i)%uint32(len(batch))] = g
}
- if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
return n
}
}
@@ -6007,22 +6001,22 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
// Steal half of elements from local runnable queue of p2
// and put onto local runnable queue of p.
// Returns one of the stolen elements (or nil if failed).
-func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
- t := _p_.runqtail
- n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
+func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
+ t := pp.runqtail
+ n := runqgrab(p2, &pp.runq, t, stealRunNextG)
if n == 0 {
return nil
}
n--
- gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
+ gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
if n == 0 {
return gp
}
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
- if t-h+n >= uint32(len(_p_.runq)) {
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
+ if t-h+n >= uint32(len(pp.runq)) {
throw("runqsteal: runq overflow")
}
- atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
+ atomic.StoreRel(&pp.runqtail, t+n) // store-release, makes the item available for consumption
return gp
}
@@ -6143,8 +6137,8 @@ func setMaxThreads(in int) (out int) {
//go:nosplit
func procPin() int {
- _g_ := getg()
- mp := _g_.m
+ gp := getg()
+ mp := gp.m
mp.locks++
return int(mp.p.ptr().id)
@@ -6152,8 +6146,8 @@ func procPin() int {
//go:nosplit
func procUnpin() {
- _g_ := getg()
- _g_.m.locks--
+ gp := getg()
+ gp.m.locks--
}
//go:linkname sync_runtime_procPin sync.runtime_procPin