aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-09-29 17:01:33 -0700
committerIan Lance Taylor <iant@golang.org>2020-10-27 22:57:36 +0000
commitb4b014465216790e01aa66f9120d03230e4aff46 (patch)
tree627d7b67ed3f2b1827d93bf7ef0d4338cf359120
parent091257def92b0280b07bde9536b7cdf5f3b02aec (diff)
downloadgo-b4b014465216790e01aa66f9120d03230e4aff46.tar.gz
go-b4b014465216790e01aa66f9120d03230e4aff46.zip
runtime: don't always adjust timers
Some programs have a lot of timers that they adjust both forward and backward in time. This can cause a large number of timerModifiedEarlier timers. In practice these timers are used for I/O deadlines and are rarely reached. The effect is that the runtime spends a lot of time in adjusttimers making sure that there are no timerModifiedEarlier timers, but the effort is wasted because none of the adjusted timers are near the top of the timer heap anyhow. Avoid much of this extra work by keeping track of the earliest known timerModifiedEarlier timer. This lets us skip adjusttimers if we know that none of the timers will be ready to run anyhow. We will still eventually run it, when we reach the deadline of the earliest known timerModifiedEarlier, although in practice that timer has likely been removed. When we do run adjusttimers, we will reset all of the timerModifiedEarlier timers, and clear our notion of when we need to run adjusttimers again. This effect should be to significantly reduce the number of times we walk through the timer list in adjusttimers. Fixes #41699 Change-Id: I38eb2be611fb34e3017bb33d0a9ed40d75fb414f Reviewed-on: https://go-review.googlesource.com/c/go/+/258303 Trust: Ian Lance Taylor <iant@golang.org> Trust: Emmanuel Odeke <emmanuel@orijtech.com> Run-TryBot: Ian Lance Taylor <iant@golang.org> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
-rw-r--r--src/runtime/proc.go52
-rw-r--r--src/runtime/runtime2.go7
-rw-r--r--src/runtime/time.go99
3 files changed, 88 insertions, 70 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 6feecef985..87d4b6e568 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -3017,40 +3017,40 @@ func dropg() {
// We pass now in and out to avoid extra calls of nanotime.
//go:yeswritebarrierrec
func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
- // If there are no timers to adjust, and the first timer on
- // the heap is not yet ready to run, then there is nothing to do.
- if atomic.Load(&pp.adjustTimers) == 0 {
- next := int64(atomic.Load64(&pp.timer0When))
- if next == 0 {
- return now, 0, false
- }
- if now == 0 {
- now = nanotime()
- }
- if now < next {
- // Next timer is not ready to run.
- // But keep going if we would clear deleted timers.
- // This corresponds to the condition below where
- // we decide whether to call clearDeletedTimers.
- if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
- return now, next, false
- }
+ // If it's not yet time for the first timer, or the first adjusted
+ // timer, then there is nothing to do.
+ next := int64(atomic.Load64(&pp.timer0When))
+ nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
+ if next == 0 || (nextAdj != 0 && nextAdj < next) {
+ next = nextAdj
+ }
+
+ if next == 0 {
+ // No timers to run or adjust.
+ return now, 0, false
+ }
+
+ if now == 0 {
+ now = nanotime()
+ }
+ if now < next {
+ // Next timer is not ready to run, but keep going
+ // if we would clear deleted timers.
+ // This corresponds to the condition below where
+ // we decide whether to call clearDeletedTimers.
+ if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
+ return now, next, false
}
}
lock(&pp.timersLock)
- adjusttimers(pp)
-
- rnow = now
if len(pp.timers) > 0 {
- if rnow == 0 {
- rnow = nanotime()
- }
+ adjusttimers(pp, now)
for len(pp.timers) > 0 {
// Note that runtimer may temporarily unlock
// pp.timersLock.
- if tw := runtimer(pp, rnow); tw != 0 {
+ if tw := runtimer(pp, now); tw != 0 {
if tw > 0 {
pollUntil = tw
}
@@ -3069,7 +3069,7 @@ func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
unlock(&pp.timersLock)
- return rnow, pollUntil, ran
+ return now, pollUntil, ran
}
func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 7bac5fd38d..a2e4411c7d 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -646,6 +646,13 @@ type p struct {
// This is 0 if the timer heap is empty.
timer0When uint64
+ // The earliest known nextwhen field of a timer with
+ // timerModifiedEarlier status. Because the timer may have been
+ // modified again, there need not be any timer with this value.
+ // This is updated using atomic functions.
+ // This is 0 if the value is unknown.
+ timerModifiedEarliest uint64
+
// Per-P GC state
gcAssistTime int64 // Nanoseconds in assistAlloc
gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
diff --git a/src/runtime/time.go b/src/runtime/time.go
index f895bf8443..99290f66d0 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -491,6 +491,8 @@ loop:
newStatus = timerModifiedEarlier
}
+ tpp := t.pp.ptr()
+
// Update the adjustTimers field. Subtract one if we
// are removing a timerModifiedEarlier, add one if we
// are adding a timerModifiedEarlier.
@@ -500,9 +502,10 @@ loop:
}
if newStatus == timerModifiedEarlier {
adjust++
+ updateTimerModifiedEarliest(tpp, when)
}
if adjust != 0 {
- atomic.Xadd(&t.pp.ptr().adjustTimers, adjust)
+ atomic.Xadd(&tpp.adjustTimers, adjust)
}
// Set the new status of the timer.
@@ -637,16 +640,36 @@ func moveTimers(pp *p, timers []*timer) {
// the correct place in the heap. While looking for those timers,
// it also moves timers that have been modified to run later,
// and removes deleted timers. The caller must have locked the timers for pp.
-func adjusttimers(pp *p) {
- if len(pp.timers) == 0 {
- return
- }
+func adjusttimers(pp *p, now int64) {
if atomic.Load(&pp.adjustTimers) == 0 {
if verifyTimers {
verifyTimerHeap(pp)
}
+ // There are no timers to adjust, so it is safe to clear
+ // timerModifiedEarliest. Do so in case it is stale.
+ // Everything will work if we don't do this,
+ // but clearing here may save future calls to adjusttimers.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
return
}
+
+ // If we haven't yet reached the time of the first timerModifiedEarlier
+ // timer, don't do anything. This speeds up programs that adjust
+ // a lot of timers back and forth if the timers rarely expire.
+ // We'll postpone looking through all the adjusted timers until
+ // one would actually expire.
+ if first := atomic.Load64(&pp.timerModifiedEarliest); first != 0 {
+ if int64(first) > now {
+ if verifyTimers {
+ verifyTimerHeap(pp)
+ }
+ return
+ }
+
+ // We are going to clear all timerModifiedEarlier timers.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+ }
+
var moved []*timer
loop:
for i := 0; i < len(pp.timers); i++ {
@@ -868,6 +891,10 @@ func runOneTimer(pp *p, t *timer, now int64) {
//
// The caller must have locked the timers for pp.
func clearDeletedTimers(pp *p) {
+ // We are going to clear all timerModifiedEarlier timers.
+ // Do this now in case new ones show up while we are looping.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+
cdel := int32(0)
cearlier := int32(0)
to := 0
@@ -977,6 +1004,21 @@ func updateTimer0When(pp *p) {
}
}
+// updateTimerModifiedEarliest updates the recorded nextwhen field of the
+// earlier timerModifiedEarier value.
+// The timers for pp will not be locked.
+func updateTimerModifiedEarliest(pp *p, nextwhen int64) {
+ for {
+ old := atomic.Load64(&pp.timerModifiedEarliest)
+ if old != 0 && int64(old) < nextwhen {
+ return
+ }
+ if atomic.Cas64(&pp.timerModifiedEarliest, old, uint64(nextwhen)) {
+ return
+ }
+ }
+}
+
// timeSleepUntil returns the time when the next timer should fire,
// and the P that holds the timer heap that that timer is on.
// This is only called by sysmon and checkdead.
@@ -993,48 +1035,17 @@ func timeSleepUntil() (int64, *p) {
continue
}
- c := atomic.Load(&pp.adjustTimers)
- if c == 0 {
- w := int64(atomic.Load64(&pp.timer0When))
- if w != 0 && w < next {
- next = w
- pret = pp
- }
- continue
+ w := int64(atomic.Load64(&pp.timer0When))
+ if w != 0 && w < next {
+ next = w
+ pret = pp
}
- lock(&pp.timersLock)
- for _, t := range pp.timers {
- switch s := atomic.Load(&t.status); s {
- case timerWaiting:
- if t.when < next {
- next = t.when
- }
- case timerModifiedEarlier, timerModifiedLater:
- if t.nextwhen < next {
- next = t.nextwhen
- }
- if s == timerModifiedEarlier {
- c--
- }
- }
- // The timers are sorted, so we only have to check
- // the first timer for each P, unless there are
- // some timerModifiedEarlier timers. The number
- // of timerModifiedEarlier timers is in the adjustTimers
- // field, used to initialize c, above.
- //
- // We don't worry about cases like timerModifying.
- // New timers can show up at any time,
- // so this function is necessarily imprecise.
- // Do a signed check here since we aren't
- // synchronizing the read of pp.adjustTimers
- // with the check of a timer status.
- if int32(c) <= 0 {
- break
- }
+ w = int64(atomic.Load64(&pp.timerModifiedEarliest))
+ if w != 0 && w < next {
+ next = w
+ pret = pp
}
- unlock(&pp.timersLock)
}
unlock(&allpLock)