aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/panic.go
diff options
context:
space:
mode:
authorKeith Randall <keithr@alum.mit.edu>2019-04-11 09:50:59 -0700
committerKeith Randall <khr@golang.org>2019-06-04 17:35:20 +0000
commitfff4f599fe1c21e411a99de5c9b3777d06ce0ce6 (patch)
tree8e048033606759d2e1ad5bb11085a5188ba3cdd0 /src/runtime/panic.go
parent8343a0934df8f437938c55a6f0ff120c7c24a8bb (diff)
downloadgo-fff4f599fe1c21e411a99de5c9b3777d06ce0ce6.tar.gz
go-fff4f599fe1c21e411a99de5c9b3777d06ce0ce6.zip
cmd/compile,runtime: allocate defer records on the stack
When a defer is executed at most once in a function body, we can allocate the defer record for it on the stack instead of on the heap. This should make defers like this (which are very common) faster. This optimization applies to 363 out of the 370 static defer sites in the cmd/go binary. name old time/op new time/op delta Defer-4 52.2ns ± 5% 36.2ns ± 3% -30.70% (p=0.000 n=10+10) Fixes #6980 Update #14939 Change-Id: I697109dd7aeef9e97a9eeba2ef65ff53d3ee1004 Reviewed-on: https://go-review.googlesource.com/c/go/+/171758 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/panic.go')
-rw-r--r--src/runtime/panic.go44
1 files changed, 44 insertions, 0 deletions
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index f39a4bc0a2..ce26eb540d 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -228,6 +228,46 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
// been set and must not be clobbered.
}
+// deferprocStack queues a new deferred function with a defer record on the stack.
+// The defer record must have its siz and fn fields initialized.
+// All other fields can contain junk.
+// The defer record must be immediately followed in memory by
+// the arguments of the defer.
+// Nosplit because the arguments on the stack won't be scanned
+// until the defer record is spliced into the gp._defer list.
+//go:nosplit
+func deferprocStack(d *_defer) {
+ gp := getg()
+ if gp.m.curg != gp {
+ // go code on the system stack can't defer
+ throw("defer on system stack")
+ }
+ // siz and fn are already set.
+ // The other fields are junk on entry to deferprocStack and
+ // are initialized here.
+ d.started = false
+ d.heap = false
+ d.sp = getcallersp()
+ d.pc = getcallerpc()
+ // The lines below implement:
+ // d.panic = nil
+ // d.link = gp._defer
+ // gp._defer = d
+ // But without write barriers. The first two are writes to
+ // the stack so they don't need a write barrier, and furthermore
+ // are to uninitialized memory, so they must not use a write barrier.
+ // The third write does not require a write barrier because we
+ // explicitly mark all the defer structures, so we don't need to
+ // keep track of pointers to them with a write barrier.
+ *(*uintptr)(unsafe.Pointer(&d._panic)) = 0
+ *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
+ *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
+
+ return0()
+ // No code can go here - the C return register has
+ // been set and must not be clobbered.
+}
+
// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
// Each P holds a pool for defers with small arg sizes.
// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
@@ -349,6 +389,7 @@ func newdefer(siz int32) *_defer {
}
}
d.siz = siz
+ d.heap = true
d.link = gp._defer
gp._defer = d
return d
@@ -368,6 +409,9 @@ func freedefer(d *_defer) {
if d.fn != nil {
freedeferfn()
}
+ if !d.heap {
+ return
+ }
sc := deferclass(uintptr(d.siz))
if sc >= uintptr(len(p{}.deferpool)) {
return