aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/symtab.go
diff options
context:
space:
mode:
authorJosh Bleecher Snyder <josharian@gmail.com>2018-04-01 16:39:34 -0700
committerJosh Bleecher Snyder <josharian@gmail.com>2018-11-09 16:06:56 +0000
commit7d6b5e340c9c89416ffbf3e0bc997b77304c31f7 (patch)
tree2e34842f1fbb85ae148ab3d1f7b5178f45295298 /src/runtime/symtab.go
parentfcd1ec0c822b31b0849e98941b54a12a3431bedd (diff)
downloadgo-7d6b5e340c9c89416ffbf3e0bc997b77304c31f7.tar.gz
go-7d6b5e340c9c89416ffbf3e0bc997b77304c31f7.zip
runtime: reduce linear search through pcvalue cache
This change introduces two optimizations together, one for recursive and one for non-recursive stacks. For recursive stacks, we introduce the new entry at the beginning of the cache, so it can be found first. This adds an extra read and write. While we're here, switch from fastrandn, which does a multiply, to fastrand % n, which does a shift. For non-recursive stacks, split the cache from [16]pcvalueCacheEnt into [2][8]pcvalueCacheEnt, and add a very cheap associative lookup. name old time/op new time/op delta StackCopyPtr-8 118ms ± 1% 106ms ± 2% -9.56% (p=0.000 n=17+18) StackCopy-8 95.8ms ± 1% 87.0ms ± 3% -9.11% (p=0.000 n=19+20) StackCopyNoCache-8 135ms ± 2% 139ms ± 1% +3.06% (p=0.000 n=19+18) During make.bash, the association function used has this return distribution: percent count return value 53.23% 678797 1 46.74% 596094 0 It is definitely not perfect, but it is pretty good, and that's all we need. Change-Id: I2cabb1d26b99c5111bc28f427016a2a5e6c620fd Reviewed-on: https://go-review.googlesource.com/c/110564 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/symtab.go')
-rw-r--r--src/runtime/symtab.go24
1 files changed, 19 insertions, 5 deletions
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 1dc7ab740e..edda45c669 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -698,7 +698,7 @@ func findfunc(pc uintptr) funcInfo {
}
type pcvalueCache struct {
- entries [16]pcvalueCacheEnt
+ entries [2][8]pcvalueCacheEnt
}
type pcvalueCacheEnt struct {
@@ -709,6 +709,14 @@ type pcvalueCacheEnt struct {
val int32
}
+// pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
+// It must be very cheap to calculate.
+// For now, align to sys.PtrSize and reduce mod the number of entries.
+// In practice, this appears to be fairly randomly and evenly distributed.
+func pcvalueCacheKey(targetpc uintptr) uintptr {
+ return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries))
+}
+
func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 {
if off == 0 {
return -1
@@ -721,13 +729,14 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric
// cheaper than doing the hashing for a less associative
// cache.
if cache != nil {
- for i := range cache.entries {
+ x := pcvalueCacheKey(targetpc)
+ for i := range cache.entries[x] {
// We check off first because we're more
// likely to have multiple entries with
// different offsets for the same targetpc
// than the other way around, so we'll usually
// fail in the first clause.
- ent := &cache.entries[i]
+ ent := &cache.entries[x][i]
if ent.off == off && ent.targetpc == targetpc {
return ent.val
}
@@ -756,9 +765,14 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric
// replacement prevents a performance cliff if
// a recursive stack's cycle is slightly
// larger than the cache.
+ // Put the new element at the beginning,
+ // since it is the most likely to be newly used.
if cache != nil {
- ci := fastrandn(uint32(len(cache.entries)))
- cache.entries[ci] = pcvalueCacheEnt{
+ x := pcvalueCacheKey(targetpc)
+ e := &cache.entries[x]
+ ci := fastrand() % uint32(len(cache.entries[x]))
+ e[ci] = e[0]
+ e[0] = pcvalueCacheEnt{
targetpc: targetpc,
off: off,
val: val,