aboutsummaryrefslogtreecommitdiff
path: root/test/heapsampling.go
diff options
context:
space:
mode:
authorRaul Silvera <rsilvera@google.com>2015-09-14 14:03:45 -0700
committerMinux Ma <minux@golang.org>2015-10-05 08:15:09 +0000
commit27ee719fb32b47b9bc59921e457f4b1e7f767968 (patch)
tree61845f2dc10b387dac8b4fe5c4ff7b5ae6fd4ad4 /test/heapsampling.go
parent0357c38adfd5d368390d82a2ab5b32748e4bb549 (diff)
downloadgo-27ee719fb32b47b9bc59921e457f4b1e7f767968.tar.gz
go-27ee719fb32b47b9bc59921e457f4b1e7f767968.zip
pprof: improve sampling for heap profiling
The current heap sampling introduces some bias that interferes with unsampling, producing unexpected heap profiles. The solution is to use a Poisson process to generate the sampling points, using the formulas described at https://en.wikipedia.org/wiki/Poisson_process This fixes #12620 Change-Id: If2400809ed3c41de504dd6cff06be14e476ff96c Reviewed-on: https://go-review.googlesource.com/14590 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Minux Ma <minux@golang.org> Run-TryBot: Minux Ma <minux@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'test/heapsampling.go')
-rw-r--r--test/heapsampling.go166
1 files changed, 166 insertions, 0 deletions
diff --git a/test/heapsampling.go b/test/heapsampling.go
new file mode 100644
index 0000000000..d5ffc7f202
--- /dev/null
+++ b/test/heapsampling.go
@@ -0,0 +1,166 @@
+// run
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test heap sampling logic.
+
+package main
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+)
+
+var a16 *[16]byte
+var a512 *[512]byte
+var a256 *[256]byte
+var a1k *[1024]byte
+var a64k *[64 * 1024]byte
+
+// This test checks that heap sampling produces reasonable
+// results. Note that heap sampling uses randomization, so the results
+// vary for run to run. This test only checks that the resulting
+// values appear reasonable.
+func main() {
+ const countInterleaved = 10000
+ allocInterleaved(countInterleaved)
+ checkAllocations(getMemProfileRecords(), "main.allocInterleaved", countInterleaved, []int64{256 * 1024, 1024, 256 * 1024, 512, 256 * 1024, 256})
+
+ const count = 100000
+ alloc(count)
+ checkAllocations(getMemProfileRecords(), "main.alloc", count, []int64{1024, 512, 256})
+}
+
+// allocInterleaved stress-tests the heap sampling logic by
+// interleaving large and small allocations.
+func allocInterleaved(n int) {
+ for i := 0; i < n; i++ {
+ // Test verification depends on these lines being contiguous.
+ a64k = new([64 * 1024]byte)
+ a1k = new([1024]byte)
+ a64k = new([64 * 1024]byte)
+ a512 = new([512]byte)
+ a64k = new([64 * 1024]byte)
+ a256 = new([256]byte)
+ }
+}
+
+// alloc performs only small allocations for sanity testing.
+func alloc(n int) {
+ for i := 0; i < n; i++ {
+ // Test verification depends on these lines being contiguous.
+ a1k = new([1024]byte)
+ a512 = new([512]byte)
+ a256 = new([256]byte)
+ }
+}
+
+// checkAllocations validates that the profile records collected for
+// the named function are consistent with count contiguous allocations
+// of the specified sizes.
+func checkAllocations(records []runtime.MemProfileRecord, fname string, count int64, size []int64) {
+ a := allocObjects(records, fname)
+ firstLine := 0
+ for ln := range a {
+ if firstLine == 0 || firstLine > ln {
+ firstLine = ln
+ }
+ }
+ var totalcount int64
+ for i, w := range size {
+ ln := firstLine + i
+ s := a[ln]
+ checkValue(fname, ln, "objects", count, s.objects)
+ checkValue(fname, ln, "bytes", count*w, s.bytes)
+ totalcount += s.objects
+ }
+ // Check the total number of allocations, to ensure some sampling occurred.
+ if totalwant := count * int64(len(size)); totalcount <= 0 || totalcount > totalwant*1024 {
+ panic(fmt.Sprintf("%s want total count > 0 && <= %d, got %d", fname, totalwant*1024, totalcount))
+ }
+}
+
+// checkValue checks an unsampled value against a range.
+func checkValue(fname string, ln int, name string, want, got int64) {
+ if got < 0 || got > 1024*want {
+ panic(fmt.Sprintf("%s:%d want %s >= 0 && <= %d, got %d", fname, ln, name, 1024*want, got))
+ }
+}
+
+func getMemProfileRecords() []runtime.MemProfileRecord {
+ // Find out how many records there are (MemProfile(nil, true)),
+ // allocate that many records, and get the data.
+ // There's a race—more records might be added between
+ // the two calls—so allocate a few extra records for safety
+ // and also try again if we're very unlucky.
+ // The loop should only execute one iteration in the common case.
+ var p []runtime.MemProfileRecord
+ n, ok := runtime.MemProfile(nil, true)
+ for {
+ // Allocate room for a slightly bigger profile,
+ // in case a few more entries have been added
+ // since the call to MemProfile.
+ p = make([]runtime.MemProfileRecord, n+50)
+ n, ok = runtime.MemProfile(p, true)
+ if ok {
+ p = p[0:n]
+ break
+ }
+ // Profile grew; try again.
+ }
+ return p
+}
+
+type allocStat struct {
+ bytes, objects int64
+}
+
+// allocObjects examines the profile records for the named function
+// and returns the allocation stats aggregated by source line number.
+func allocObjects(records []runtime.MemProfileRecord, function string) map[int]allocStat {
+ a := make(map[int]allocStat)
+ for _, r := range records {
+ for _, s := range r.Stack0 {
+ if s == 0 {
+ break
+ }
+ if f := runtime.FuncForPC(s); f != nil {
+ name := f.Name()
+ _, line := f.FileLine(s)
+ if name == function {
+ allocStat := a[line]
+ allocStat.bytes += r.AllocBytes
+ allocStat.objects += r.AllocObjects
+ a[line] = allocStat
+ }
+ }
+ }
+ }
+ for line, stats := range a {
+ objects, bytes := scaleHeapSample(stats.objects, stats.bytes, int64(runtime.MemProfileRate))
+ a[line] = allocStat{bytes, objects}
+ }
+ return a
+}
+
+// scaleHeapSample unsamples heap allocations.
+// Taken from src/cmd/pprof/internal/profile/legacy_profile.go
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}