aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/pprof
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2016-10-31 12:09:03 -0400
committerRuss Cox <rsc@golang.org>2016-11-02 19:09:21 +0000
commitd5b97f614eb02399f7b4ed6615fae094362d151d (patch)
tree25ad6f060a4a3f571b98ef3195f0550e3fa115ce /src/cmd/pprof
parent682ffae6db749ba63df4b8bc1739974346bb14d7 (diff)
downloadgo-d5b97f614eb02399f7b4ed6615fae094362d151d.tar.gz
go-d5b97f614eb02399f7b4ed6615fae094362d151d.zip
cmd/pprof: move cmd/internal/pprof back to cmd/pprof/internal
CL 21870 moved the entire cmd/pprof/internal directory to cmd/internal/pprof for use by cmd/trace, but really cmd/trace only needed cmd/pprof/internal/profile, which became cmd/internal/pprof/profile, and then internal/pprof/profile. Move the rest back under cmd/pprof so that it is clear that no other code is reaching into the guts of cmd/pprof. Just like functions should not be exported unless necessary, internals should not be made visible to more code than necessary. Raúl Silvera noted after the commit of CL 21870 that only the profile package should have moved, but there was no followup fix (until now). Change-Id: I603f4dcb0616df1e5d5eb7372e6fccda57e05079 Reviewed-on: https://go-review.googlesource.com/32453 Run-TryBot: Russ Cox <rsc@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
Diffstat (limited to 'src/cmd/pprof')
-rw-r--r--src/cmd/pprof/internal/commands/commands.go235
-rw-r--r--src/cmd/pprof/internal/driver/driver.go1039
-rw-r--r--src/cmd/pprof/internal/driver/interactive.go492
-rw-r--r--src/cmd/pprof/internal/fetch/fetch.go82
-rw-r--r--src/cmd/pprof/internal/plugin/plugin.go213
-rw-r--r--src/cmd/pprof/internal/report/report.go1726
-rw-r--r--src/cmd/pprof/internal/report/source.go454
-rw-r--r--src/cmd/pprof/internal/report/source_html.go77
-rw-r--r--src/cmd/pprof/internal/svg/svg.go71
-rw-r--r--src/cmd/pprof/internal/svg/svgpan.go291
-rw-r--r--src/cmd/pprof/internal/symbolizer/symbolizer.go195
-rw-r--r--src/cmd/pprof/internal/symbolz/symbolz.go111
-rw-r--r--src/cmd/pprof/internal/tempfile/tempfile.go46
-rw-r--r--src/cmd/pprof/pprof.go12
14 files changed, 5038 insertions, 6 deletions
diff --git a/src/cmd/pprof/internal/commands/commands.go b/src/cmd/pprof/internal/commands/commands.go
new file mode 100644
index 0000000000..143b7c21bc
--- /dev/null
+++ b/src/cmd/pprof/internal/commands/commands.go
@@ -0,0 +1,235 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package commands defines and manages the basic pprof commands
+package commands
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ "cmd/internal/browser"
+ "cmd/pprof/internal/plugin"
+ "cmd/pprof/internal/report"
+ "cmd/pprof/internal/svg"
+ "cmd/pprof/internal/tempfile"
+)
+
+// Commands describes the commands accepted by pprof.
+type Commands map[string]*Command
+
+// Command describes the actions for a pprof command. Includes a
+// function for command-line completion, the report format to use
+// during report generation, any postprocessing functions, and whether
+// the command expects a regexp parameter (typically a function name).
+type Command struct {
+ Complete Completer // autocomplete for interactive mode
+ Format int // report format to generate
+ PostProcess PostProcessor // postprocessing to run on report
+ HasParam bool // Collect a parameter from the CLI
+ Usage string // Help text
+}
+
+// Completer is a function for command-line autocompletion
+type Completer func(prefix string) string
+
+// PostProcessor is a function that applies post-processing to the report output
+type PostProcessor func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error
+
+// PProf returns the basic pprof report-generation commands
+func PProf(c Completer, interactive **bool) Commands {
+ return Commands{
+ // Commands that require no post-processing.
+ "tags": {nil, report.Tags, nil, false, "Outputs all tags in the profile"},
+ "raw": {c, report.Raw, nil, false, "Outputs a text representation of the raw profile"},
+ "dot": {c, report.Dot, nil, false, "Outputs a graph in DOT format"},
+ "top": {c, report.Text, nil, false, "Outputs top entries in text form"},
+ "tree": {c, report.Tree, nil, false, "Outputs a text rendering of call graph"},
+ "text": {c, report.Text, nil, false, "Outputs top entries in text form"},
+ "disasm": {c, report.Dis, nil, true, "Output annotated assembly for functions matching regexp or address"},
+ "list": {c, report.List, nil, true, "Output annotated source for functions matching regexp"},
+ "peek": {c, report.Tree, nil, true, "Output callers/callees of functions matching regexp"},
+
+ // Save binary formats to a file
+ "callgrind": {c, report.Callgrind, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format"},
+ "proto": {c, report.Proto, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format"},
+
+ // Generate report in DOT format and postprocess with dot
+ "gif": {c, report.Dot, invokeDot("gif"), false, "Outputs a graph image in GIF format"},
+ "pdf": {c, report.Dot, invokeDot("pdf"), false, "Outputs a graph in PDF format"},
+ "png": {c, report.Dot, invokeDot("png"), false, "Outputs a graph image in PNG format"},
+ "ps": {c, report.Dot, invokeDot("ps"), false, "Outputs a graph in PS format"},
+
+ // Save SVG output into a file after including svgpan library
+ "svg": {c, report.Dot, saveSVGToFile(), false, "Outputs a graph in SVG format"},
+
+ // Visualize postprocessed dot output
+ "eog": {c, report.Dot, invokeVisualizer(interactive, invokeDot("svg"), "svg", []string{"eog"}), false, "Visualize graph through eog"},
+ "evince": {c, report.Dot, invokeVisualizer(interactive, invokeDot("pdf"), "pdf", []string{"evince"}), false, "Visualize graph through evince"},
+ "gv": {c, report.Dot, invokeVisualizer(interactive, invokeDot("ps"), "ps", []string{"gv --noantialias"}), false, "Visualize graph through gv"},
+ "web": {c, report.Dot, invokeVisualizer(interactive, saveSVGToFile(), "svg", browsers()), false, "Visualize graph through web browser"},
+
+ // Visualize HTML directly generated by report.
+ "weblist": {c, report.WebList, invokeVisualizer(interactive, awayFromTTY("html"), "html", browsers()), true, "Output annotated source in HTML for functions matching regexp or address"},
+ }
+}
+
+// browsers returns a list of commands to attempt for web visualization
+// on the current platform
+func browsers() []string {
+ var cmds []string
+ for _, cmd := range browser.Commands() {
+ cmds = append(cmds, strings.Join(cmd, " "))
+ }
+ return cmds
+}
+
+// NewCompleter creates an autocompletion function for a set of commands.
+func NewCompleter(cs Commands) Completer {
+ return func(line string) string {
+ switch tokens := strings.Fields(line); len(tokens) {
+ case 0:
+ // Nothing to complete
+ case 1:
+ // Single token -- complete command name
+ found := ""
+ for c := range cs {
+ if strings.HasPrefix(c, tokens[0]) {
+ if found != "" {
+ return line
+ }
+ found = c
+ }
+ }
+ if found != "" {
+ return found
+ }
+ default:
+ // Multiple tokens -- complete using command completer
+ if c, ok := cs[tokens[0]]; ok {
+ if c.Complete != nil {
+ lastTokenIdx := len(tokens) - 1
+ lastToken := tokens[lastTokenIdx]
+ if strings.HasPrefix(lastToken, "-") {
+ lastToken = "-" + c.Complete(lastToken[1:])
+ } else {
+ lastToken = c.Complete(lastToken)
+ }
+ return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ")
+ }
+ }
+ }
+ return line
+ }
+}
+
+// awayFromTTY saves the output in a file if it would otherwise go to
+// the terminal screen. This is used to avoid dumping binary data on
+// the screen.
+func awayFromTTY(format string) PostProcessor {
+ return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
+ if output == os.Stdout && ui.IsTerminal() {
+ tempFile, err := tempfile.New("", "profile", "."+format)
+ if err != nil {
+ return err
+ }
+ ui.PrintErr("Generating report in ", tempFile.Name())
+ _, err = fmt.Fprint(tempFile, input)
+ return err
+ }
+ _, err := fmt.Fprint(output, input)
+ return err
+ }
+}
+
+func invokeDot(format string) PostProcessor {
+ divert := awayFromTTY(format)
+ return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
+ if _, err := exec.LookPath("dot"); err != nil {
+ ui.PrintErr("Cannot find dot, have you installed Graphviz?")
+ return err
+ }
+ cmd := exec.Command("dot", "-T"+format)
+ var buf bytes.Buffer
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = input, &buf, os.Stderr
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ return divert(&buf, output, ui)
+ }
+}
+
+func saveSVGToFile() PostProcessor {
+ generateSVG := invokeDot("svg")
+ divert := awayFromTTY("svg")
+ return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
+ baseSVG := &bytes.Buffer{}
+ generateSVG(input, baseSVG, ui)
+ massaged := &bytes.Buffer{}
+ fmt.Fprint(massaged, svg.Massage(*baseSVG))
+ return divert(massaged, output, ui)
+ }
+}
+
+var vizTmpDir string
+
+func makeVizTmpDir() error {
+ if vizTmpDir != "" {
+ return nil
+ }
+ name, err := ioutil.TempDir("", "pprof-")
+ if err != nil {
+ return err
+ }
+ tempfile.DeferDelete(name)
+ vizTmpDir = name
+ return nil
+}
+
+func invokeVisualizer(interactive **bool, format PostProcessor, suffix string, visualizers []string) PostProcessor {
+ return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
+ if err := makeVizTmpDir(); err != nil {
+ return err
+ }
+ tempFile, err := tempfile.New(vizTmpDir, "pprof", "."+suffix)
+ if err != nil {
+ return err
+ }
+ tempfile.DeferDelete(tempFile.Name())
+ if err = format(input, tempFile, ui); err != nil {
+ return err
+ }
+ tempFile.Close() // on windows, if the file is Open, start cannot access it.
+ // Try visualizers until one is successful
+ for _, v := range visualizers {
+ // Separate command and arguments for exec.Command.
+ args := strings.Split(v, " ")
+ if len(args) == 0 {
+ continue
+ }
+ viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
+ viewer.Stderr = os.Stderr
+ if err = viewer.Start(); err == nil {
+ // The viewer might just send a message to another program
+ // to open the file. Give that program a little time to open the
+ // file before we remove it.
+ time.Sleep(1 * time.Second)
+
+ if !**interactive {
+ // In command-line mode, wait for the viewer to be closed
+ // before proceeding
+ return viewer.Wait()
+ }
+ return nil
+ }
+ }
+ return err
+ }
+}
diff --git a/src/cmd/pprof/internal/driver/driver.go b/src/cmd/pprof/internal/driver/driver.go
new file mode 100644
index 0000000000..344d458e13
--- /dev/null
+++ b/src/cmd/pprof/internal/driver/driver.go
@@ -0,0 +1,1039 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package driver implements the core pprof functionality. It can be
+// parameterized with a flag implementation, fetch and symbolize
+// mechanisms.
+package driver
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "cmd/pprof/internal/commands"
+ "cmd/pprof/internal/plugin"
+ "cmd/pprof/internal/report"
+ "cmd/pprof/internal/tempfile"
+ "internal/pprof/profile"
+)
+
+// PProf acquires a profile, and symbolizes it using a profile
+// manager. Then it generates a report formatted according to the
+// options selected through the flags package.
+func PProf(flagset plugin.FlagSet, fetch plugin.Fetcher, sym plugin.Symbolizer, obj plugin.ObjTool, ui plugin.UI, overrides commands.Commands) error {
+ // Remove any temporary files created during pprof processing.
+ defer tempfile.Cleanup()
+
+ f, err := getFlags(flagset, overrides, ui)
+ if err != nil {
+ return err
+ }
+
+ obj.SetConfig(*f.flagTools)
+
+ sources := f.profileSource
+ if len(sources) > 1 {
+ source := sources[0]
+ // If the first argument is a supported object file, treat as executable.
+ if file, err := obj.Open(source, 0); err == nil {
+ file.Close()
+ f.profileExecName = source
+ sources = sources[1:]
+ } else if *f.flagBuildID == "" && isBuildID(source) {
+ f.flagBuildID = &source
+ sources = sources[1:]
+ }
+ }
+
+ // errMu protects concurrent accesses to errset and err. errset is set if an
+ // error is encountered by one of the goroutines grabbing a profile.
+ errMu, errset := sync.Mutex{}, false
+
+ // Fetch profiles.
+ wg := sync.WaitGroup{}
+ profs := make([]*profile.Profile, len(sources))
+ for i, source := range sources {
+ wg.Add(1)
+ go func(i int, src string) {
+ defer wg.Done()
+ p, grabErr := grabProfile(src, f.profileExecName, *f.flagBuildID, fetch, sym, obj, ui, f)
+ if grabErr != nil {
+ errMu.Lock()
+ defer errMu.Unlock()
+ errset, err = true, grabErr
+ return
+ }
+ profs[i] = p
+ }(i, source)
+ }
+ wg.Wait()
+ if errset {
+ return err
+ }
+
+ // Merge profiles.
+ prof := profs[0]
+ for _, p := range profs[1:] {
+ if err = prof.Merge(p, 1); err != nil {
+ return err
+ }
+ }
+
+ if *f.flagBase != "" {
+ // Fetch base profile and subtract from current profile.
+ base, err := grabProfile(*f.flagBase, f.profileExecName, *f.flagBuildID, fetch, sym, obj, ui, f)
+ if err != nil {
+ return err
+ }
+
+ if err = prof.Merge(base, -1); err != nil {
+ return err
+ }
+ }
+
+ if err := processFlags(prof, ui, f); err != nil {
+ return err
+ }
+
+ if !*f.flagRuntime {
+ prof.RemoveUninteresting()
+ }
+
+ if *f.flagInteractive {
+ return interactive(prof, obj, ui, f)
+ }
+
+ return generate(false, prof, obj, ui, f)
+}
+
+// isBuildID determines if the profile may contain a build ID, by
+// checking that it is a string of hex digits.
+func isBuildID(id string) bool {
+ return strings.Trim(id, "0123456789abcdefABCDEF") == ""
+}
+
+// adjustURL updates the profile source URL based on heuristics. It
+// will append ?seconds=sec for CPU profiles if not already
+// specified. Returns the hostname if the profile is remote.
+func adjustURL(source string, sec int, ui plugin.UI) (adjusted, host string, duration time.Duration) {
+ // If there is a local file with this name, just use it.
+ if _, err := os.Stat(source); err == nil {
+ return source, "", 0
+ }
+
+ url, err := url.Parse(source)
+
+ // Automatically add http:// to URLs of the form hostname:port/path.
+ // url.Parse treats "hostname" as the Scheme.
+ if err != nil || (url.Host == "" && url.Scheme != "" && url.Scheme != "file") {
+ url, err = url.Parse("http://" + source)
+ if err != nil {
+ return source, "", 0
+ }
+ }
+ if scheme := strings.ToLower(url.Scheme); scheme == "" || scheme == "file" {
+ url.Scheme = ""
+ return url.String(), "", 0
+ }
+
+ values := url.Query()
+ if urlSeconds := values.Get("seconds"); urlSeconds != "" {
+ if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil {
+ if sec >= 0 {
+ ui.PrintErr("Overriding -seconds for URL ", source)
+ }
+ sec = int(us)
+ }
+ }
+
+ switch strings.ToLower(url.Path) {
+ case "", "/":
+ // Apply default /profilez.
+ url.Path = "/profilez"
+ case "/protoz":
+ // Rewrite to /profilez?type=proto
+ url.Path = "/profilez"
+ values.Set("type", "proto")
+ }
+
+ if hasDuration(url.Path) {
+ if sec > 0 {
+ duration = time.Duration(sec) * time.Second
+ values.Set("seconds", fmt.Sprintf("%d", sec))
+ } else {
+ // Assume default duration: 30 seconds
+ duration = 30 * time.Second
+ }
+ }
+ url.RawQuery = values.Encode()
+ return url.String(), url.Host, duration
+}
+
+func hasDuration(path string) bool {
+ for _, trigger := range []string{"profilez", "wallz", "/profile"} {
+ if strings.Contains(path, trigger) {
+ return true
+ }
+ }
+ return false
+}
+
+// preprocess does filtering and aggregation of a profile based on the
+// requested options.
+func preprocess(prof *profile.Profile, ui plugin.UI, f *flags) error {
+ if *f.flagFocus != "" || *f.flagIgnore != "" || *f.flagHide != "" {
+ focus, ignore, hide, err := compileFocusIgnore(*f.flagFocus, *f.flagIgnore, *f.flagHide)
+ if err != nil {
+ return err
+ }
+ fm, im, hm := prof.FilterSamplesByName(focus, ignore, hide)
+
+ warnNoMatches(fm, *f.flagFocus, "Focus", ui)
+ warnNoMatches(im, *f.flagIgnore, "Ignore", ui)
+ warnNoMatches(hm, *f.flagHide, "Hide", ui)
+ }
+
+ if *f.flagTagFocus != "" || *f.flagTagIgnore != "" {
+ focus, err := compileTagFilter(*f.flagTagFocus, ui)
+ if err != nil {
+ return err
+ }
+ ignore, err := compileTagFilter(*f.flagTagIgnore, ui)
+ if err != nil {
+ return err
+ }
+ fm, im := prof.FilterSamplesByTag(focus, ignore)
+
+ warnNoMatches(fm, *f.flagTagFocus, "TagFocus", ui)
+ warnNoMatches(im, *f.flagTagIgnore, "TagIgnore", ui)
+ }
+
+ return aggregate(prof, f)
+}
+
+func compileFocusIgnore(focus, ignore, hide string) (f, i, h *regexp.Regexp, err error) {
+ if focus != "" {
+ if f, err = regexp.Compile(focus); err != nil {
+ return nil, nil, nil, fmt.Errorf("parsing focus regexp: %v", err)
+ }
+ }
+
+ if ignore != "" {
+ if i, err = regexp.Compile(ignore); err != nil {
+ return nil, nil, nil, fmt.Errorf("parsing ignore regexp: %v", err)
+ }
+ }
+
+ if hide != "" {
+ if h, err = regexp.Compile(hide); err != nil {
+ return nil, nil, nil, fmt.Errorf("parsing hide regexp: %v", err)
+ }
+ }
+ return
+}
+
+func compileTagFilter(filter string, ui plugin.UI) (f func(string, string, int64) bool, err error) {
+ if filter == "" {
+ return nil, nil
+ }
+ if numFilter := parseTagFilterRange(filter); numFilter != nil {
+ ui.PrintErr("Interpreted '", filter, "' as range, not regexp")
+ return func(key, val string, num int64) bool {
+ if val != "" {
+ return false
+ }
+ return numFilter(num, key)
+ }, nil
+ }
+ fx, err := regexp.Compile(filter)
+ if err != nil {
+ return nil, err
+ }
+
+ return func(key, val string, num int64) bool {
+ if val == "" {
+ return false
+ }
+ return fx.MatchString(key + ":" + val)
+ }, nil
+}
+
+var tagFilterRangeRx = regexp.MustCompile("([[:digit:]]+)([[:alpha:]]+)")
+
+// parseTagFilterRange returns a function to checks if a value is
+// contained on the range described by a string. It can recognize
+// strings of the form:
+// "32kb" -- matches values == 32kb
+// ":64kb" -- matches values <= 64kb
+// "4mb:" -- matches values >= 4mb
+// "12kb:64mb" -- matches values between 12kb and 64mb (both included).
+func parseTagFilterRange(filter string) func(int64, string) bool {
+ ranges := tagFilterRangeRx.FindAllStringSubmatch(filter, 2)
+ if len(ranges) == 0 {
+ return nil // No ranges were identified
+ }
+ v, err := strconv.ParseInt(ranges[0][1], 10, 64)
+ if err != nil {
+ panic(fmt.Errorf("Failed to parse int %s: %v", ranges[0][1], err))
+ }
+ value, unit := report.ScaleValue(v, ranges[0][2], ranges[0][2])
+ if len(ranges) == 1 {
+ switch match := ranges[0][0]; filter {
+ case match:
+ return func(v int64, u string) bool {
+ sv, su := report.ScaleValue(v, u, unit)
+ return su == unit && sv == value
+ }
+ case match + ":":
+ return func(v int64, u string) bool {
+ sv, su := report.ScaleValue(v, u, unit)
+ return su == unit && sv >= value
+ }
+ case ":" + match:
+ return func(v int64, u string) bool {
+ sv, su := report.ScaleValue(v, u, unit)
+ return su == unit && sv <= value
+ }
+ }
+ return nil
+ }
+ if filter != ranges[0][0]+":"+ranges[1][0] {
+ return nil
+ }
+ if v, err = strconv.ParseInt(ranges[1][1], 10, 64); err != nil {
+ panic(fmt.Errorf("Failed to parse int %s: %v", ranges[1][1], err))
+ }
+ value2, unit2 := report.ScaleValue(v, ranges[1][2], unit)
+ if unit != unit2 {
+ return nil
+ }
+ return func(v int64, u string) bool {
+ sv, su := report.ScaleValue(v, u, unit)
+ return su == unit && sv >= value && sv <= value2
+ }
+}
+
+func warnNoMatches(match bool, rx, option string, ui plugin.UI) {
+ if !match && rx != "" && rx != "." {
+ ui.PrintErr(option + " expression matched no samples: " + rx)
+ }
+}
+
+// grabProfile fetches and symbolizes a profile.
+func grabProfile(source, exec, buildid string, fetch plugin.Fetcher, sym plugin.Symbolizer, obj plugin.ObjTool, ui plugin.UI, f *flags) (*profile.Profile, error) {
+ source, host, duration := adjustURL(source, *f.flagSeconds, ui)
+ remote := host != ""
+
+ if remote {
+ ui.Print("Fetching profile from ", source)
+ if duration != 0 {
+ ui.Print("Please wait... (" + duration.String() + ")")
+ }
+ }
+
+ now := time.Now()
+ // Fetch profile from source.
+ // Give 50% slack on the timeout.
+ p, err := fetch(source, duration+duration/2, ui)
+ if err != nil {
+ return nil, err
+ }
+
+ // Update the time/duration if the profile source doesn't include it.
+ // TODO(rsilvera): Remove this when we remove support for legacy profiles.
+ if remote {
+ if p.TimeNanos == 0 {
+ p.TimeNanos = now.UnixNano()
+ }
+ if duration != 0 && p.DurationNanos == 0 {
+ p.DurationNanos = int64(duration)
+ }
+ }
+
+ // Replace executable/buildID with the options provided in the
+ // command line. Assume the executable is the first Mapping entry.
+ if exec != "" || buildid != "" {
+ if len(p.Mapping) == 0 {
+ // Create a fake mapping to hold the user option, and associate
+ // all samples to it.
+ m := &profile.Mapping{
+ ID: 1,
+ }
+ for _, l := range p.Location {
+ l.Mapping = m
+ }
+ p.Mapping = []*profile.Mapping{m}
+ }
+ if exec != "" {
+ p.Mapping[0].File = exec
+ }
+ if buildid != "" {
+ p.Mapping[0].BuildID = buildid
+ }
+ }
+
+ if err := sym(*f.flagSymbolize, source, p, obj, ui); err != nil {
+ return nil, err
+ }
+
+ // Save a copy of any remote profiles, unless the user is explicitly
+ // saving it.
+ if remote && !f.isFormat("proto") {
+ prefix := "pprof."
+ if len(p.Mapping) > 0 && p.Mapping[0].File != "" {
+ prefix = prefix + filepath.Base(p.Mapping[0].File) + "."
+ }
+ if !strings.ContainsRune(host, os.PathSeparator) {
+ prefix = prefix + host + "."
+ }
+ for _, s := range p.SampleType {
+ prefix = prefix + s.Type + "."
+ }
+
+ dir := os.Getenv("PPROF_TMPDIR")
+ tempFile, err := tempfile.New(dir, prefix, ".pb.gz")
+ if err == nil {
+ if err = p.Write(tempFile); err == nil {
+ ui.PrintErr("Saved profile in ", tempFile.Name())
+ }
+ }
+ if err != nil {
+ ui.PrintErr("Could not save profile: ", err)
+ }
+ }
+
+ if err := p.Demangle(obj.Demangle); err != nil {
+ ui.PrintErr("Failed to demangle profile: ", err)
+ }
+
+ if err := p.CheckValid(); err != nil {
+ return nil, fmt.Errorf("Grab %s: %v", source, err)
+ }
+
+ return p, nil
+}
+
+type flags struct {
+ flagInteractive *bool // Accept commands interactively
+ flagCommands map[string]*bool // pprof commands without parameters
+ flagParamCommands map[string]*string // pprof commands with parameters
+
+ flagOutput *string // Output file name
+
+ flagCum *bool // Sort by cumulative data
+ flagCallTree *bool // generate a context-sensitive call tree
+
+ flagAddresses *bool // Report at address level
+ flagLines *bool // Report at source line level
+ flagFiles *bool // Report at file level
+ flagFunctions *bool // Report at function level [default]
+
+ flagSymbolize *string // Symbolization options (=none to disable)
+ flagBuildID *string // Override build if for first mapping
+
+ flagNodeCount *int // Max number of nodes to show
+ flagNodeFraction *float64 // Hide nodes below <f>*total
+ flagEdgeFraction *float64 // Hide edges below <f>*total
+ flagTrim *bool // Set to false to ignore NodeCount/*Fraction
+ flagRuntime *bool // Show runtime call frames in memory profiles
+ flagFocus *string // Restricts to paths going through a node matching regexp
+ flagIgnore *string // Skips paths going through any nodes matching regexp
+ flagHide *string // Skips sample locations matching regexp
+ flagTagFocus *string // Restrict to samples tagged with key:value matching regexp
+ flagTagIgnore *string // Discard samples tagged with key:value matching regexp
+ flagDropNegative *bool // Skip negative values
+
+ flagBase *string // Source for base profile to user for comparison
+
+ flagSeconds *int // Length of time for dynamic profiles
+
+ flagTotalDelay *bool // Display total delay at each region
+ flagContentions *bool // Display number of delays at each region
+ flagMeanDelay *bool // Display mean delay at each region
+
+ flagInUseSpace *bool // Display in-use memory size
+ flagInUseObjects *bool // Display in-use object counts
+ flagAllocSpace *bool // Display allocated memory size
+ flagAllocObjects *bool // Display allocated object counts
+ flagDisplayUnit *string // Measurement unit to use on reports
+ flagDivideBy *float64 // Ratio to divide sample values
+
+ flagSampleIndex *int // Sample value to use in reports.
+ flagMean *bool // Use mean of sample_index over count
+
+ flagTools *string
+ profileSource []string
+ profileExecName string
+
+ extraUsage string
+ commands commands.Commands
+}
+
+func (f *flags) isFormat(format string) bool {
+ if fl := f.flagCommands[format]; fl != nil {
+ return *fl
+ }
+ if fl := f.flagParamCommands[format]; fl != nil {
+ return *fl != ""
+ }
+ return false
+}
+
+// String provides a printable representation for the current set of flags.
+func (f *flags) String(p *profile.Profile) string {
+ var ret string
+
+ if ix := *f.flagSampleIndex; ix != -1 {
+ ret += fmt.Sprintf(" %-25s : %d (%s)\n", "sample_index", ix, p.SampleType[ix].Type)
+ }
+ if ix := *f.flagMean; ix {
+ ret += boolFlagString("mean")
+ }
+ if *f.flagDisplayUnit != "minimum" {
+ ret += stringFlagString("unit", *f.flagDisplayUnit)
+ }
+
+ switch {
+ case *f.flagInteractive:
+ ret += boolFlagString("interactive")
+ }
+ for name, fl := range f.flagCommands {
+ if *fl {
+ ret += boolFlagString(name)
+ }
+ }
+
+ if *f.flagCum {
+ ret += boolFlagString("cum")
+ }
+ if *f.flagCallTree {
+ ret += boolFlagString("call_tree")
+ }
+
+ switch {
+ case *f.flagAddresses:
+ ret += boolFlagString("addresses")
+ case *f.flagLines:
+ ret += boolFlagString("lines")
+ case *f.flagFiles:
+ ret += boolFlagString("files")
+ case *f.flagFunctions:
+ ret += boolFlagString("functions")
+ }
+
+ if *f.flagNodeCount != -1 {
+ ret += intFlagString("nodecount", *f.flagNodeCount)
+ }
+
+ ret += floatFlagString("nodefraction", *f.flagNodeFraction)
+ ret += floatFlagString("edgefraction", *f.flagEdgeFraction)
+
+ if *f.flagFocus != "" {
+ ret += stringFlagString("focus", *f.flagFocus)
+ }
+ if *f.flagIgnore != "" {
+ ret += stringFlagString("ignore", *f.flagIgnore)
+ }
+ if *f.flagHide != "" {
+ ret += stringFlagString("hide", *f.flagHide)
+ }
+
+ if *f.flagTagFocus != "" {
+ ret += stringFlagString("tagfocus", *f.flagTagFocus)
+ }
+ if *f.flagTagIgnore != "" {
+ ret += stringFlagString("tagignore", *f.flagTagIgnore)
+ }
+
+ return ret
+}
+
+func boolFlagString(label string) string {
+ return fmt.Sprintf(" %-25s : true\n", label)
+}
+
+func stringFlagString(label, value string) string {
+ return fmt.Sprintf(" %-25s : %s\n", label, value)
+}
+
+func intFlagString(label string, value int) string {
+ return fmt.Sprintf(" %-25s : %d\n", label, value)
+}
+
+func floatFlagString(label string, value float64) string {
+ return fmt.Sprintf(" %-25s : %f\n", label, value)
+}
+
+// Utility routines to set flag values.
+func newBool(b bool) *bool {
+ return &b
+}
+
+func newString(s string) *string {
+ return &s
+}
+
+func newFloat64(fl float64) *float64 {
+ return &fl
+}
+
+func newInt(i int) *int {
+ return &i
+}
+
+func (f *flags) usage(ui plugin.UI) {
+ var commandMsg []string
+ for name, cmd := range f.commands {
+ if cmd.HasParam {
+ name = name + "=p"
+ }
+ commandMsg = append(commandMsg,
+ fmt.Sprintf(" -%-16s %s", name, cmd.Usage))
+ }
+
+ sort.Strings(commandMsg)
+
+ text := usageMsgHdr + strings.Join(commandMsg, "\n") + "\n" + usageMsg + "\n"
+ if f.extraUsage != "" {
+ text += f.extraUsage + "\n"
+ }
+ text += usageMsgVars
+ ui.Print(text)
+}
+
+func getFlags(flag plugin.FlagSet, overrides commands.Commands, ui plugin.UI) (*flags, error) {
+ f := &flags{
+ flagInteractive: flag.Bool("interactive", false, "Accepts commands interactively"),
+ flagCommands: make(map[string]*bool),
+ flagParamCommands: make(map[string]*string),
+
+ // Filename for file-based output formats, stdout by default.
+ flagOutput: flag.String("output", "", "Output filename for file-based outputs "),
+ // Comparisons.
+ flagBase: flag.String("base", "", "Source for base profile for comparison"),
+ flagDropNegative: flag.Bool("drop_negative", false, "Ignore negative differences"),
+
+ // Data sorting criteria.
+ flagCum: flag.Bool("cum", false, "Sort by cumulative data"),
+ // Graph handling options.
+ flagCallTree: flag.Bool("call_tree", false, "Create a context-sensitive call tree"),
+ // Granularity of output resolution.
+ flagAddresses: flag.Bool("addresses", false, "Report at address level"),
+ flagLines: flag.Bool("lines", false, "Report at source line level"),
+ flagFiles: flag.Bool("files", false, "Report at source file level"),
+ flagFunctions: flag.Bool("functions", false, "Report at function level [default]"),
+ // Internal options.
+ flagSymbolize: flag.String("symbolize", "", "Options for profile symbolization"),
+ flagBuildID: flag.String("buildid", "", "Override build id for first mapping"),
+ // Filtering options
+ flagNodeCount: flag.Int("nodecount", -1, "Max number of nodes to show"),
+ flagNodeFraction: flag.Float64("nodefraction", 0.005, "Hide nodes below <f>*total"),
+ flagEdgeFraction: flag.Float64("edgefraction", 0.001, "Hide edges below <f>*total"),
+ flagTrim: flag.Bool("trim", true, "Honor nodefraction/edgefraction/nodecount defaults"),
+ flagRuntime: flag.Bool("runtime", false, "Show runtime call frames in memory profiles"),
+ flagFocus: flag.String("focus", "", "Restricts to paths going through a node matching regexp"),
+ flagIgnore: flag.String("ignore", "", "Skips paths going through any nodes matching regexp"),
+ flagHide: flag.String("hide", "", "Skips nodes matching regexp"),
+ flagTagFocus: flag.String("tagfocus", "", "Restrict to samples with tags in range or matched by regexp"),
+ flagTagIgnore: flag.String("tagignore", "", "Discard samples with tags in range or matched by regexp"),
+ // CPU profile options
+ flagSeconds: flag.Int("seconds", -1, "Length of time for dynamic profiles"),
+ // Heap profile options
+ flagInUseSpace: flag.Bool("inuse_space", false, "Display in-use memory size"),
+ flagInUseObjects: flag.Bool("inuse_objects", false, "Display in-use object counts"),
+ flagAllocSpace: flag.Bool("alloc_space", false, "Display allocated memory size"),
+ flagAllocObjects: flag.Bool("alloc_objects", false, "Display allocated object counts"),
+ flagDisplayUnit: flag.String("unit", "minimum", "Measurement units to display"),
+ flagDivideBy: flag.Float64("divide_by", 1.0, "Ratio to divide all samples before visualization"),
+ flagSampleIndex: flag.Int("sample_index", -1, "Index of sample value to report"),
+ flagMean: flag.Bool("mean", false, "Average sample value over first value (count)"),
+ // Contention profile options
+ flagTotalDelay: flag.Bool("total_delay", false, "Display total delay at each region"),
+ flagContentions: flag.Bool("contentions", false, "Display number of delays at each region"),
+ flagMeanDelay: flag.Bool("mean_delay", false, "Display mean delay at each region"),
+ flagTools: flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames"),
+ extraUsage: flag.ExtraUsage(),
+ }
+
+ // Flags used during command processing
+ interactive := &f.flagInteractive
+ f.commands = commands.PProf(functionCompleter, interactive)
+
+ // Override commands
+ for name, cmd := range overrides {
+ f.commands[name] = cmd
+ }
+
+ for name, cmd := range f.commands {
+ if cmd.HasParam {
+ f.flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp")
+ } else {
+ f.flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format")
+ }
+ }
+
+ args := flag.Parse(func() { f.usage(ui) })
+ if len(args) == 0 {
+ return nil, fmt.Errorf("no profile source specified")
+ }
+
+ f.profileSource = args
+
+ // Instruct legacy heapz parsers to grab historical allocation data,
+ // instead of the default in-use data. Not available with tcmalloc.
+ if *f.flagAllocSpace || *f.flagAllocObjects {
+ profile.LegacyHeapAllocated = true
+ }
+
+ if profileDir := os.Getenv("PPROF_TMPDIR"); profileDir == "" {
+ profileDir = os.Getenv("HOME") + "/pprof"
+ os.Setenv("PPROF_TMPDIR", profileDir)
+ if err := os.MkdirAll(profileDir, 0755); err != nil {
+ return nil, fmt.Errorf("failed to access temp dir %s: %v", profileDir, err)
+ }
+ }
+
+ return f, nil
+}
+
+func processFlags(p *profile.Profile, ui plugin.UI, f *flags) error {
+ flagDis := f.isFormat("disasm")
+ flagPeek := f.isFormat("peek")
+ flagWebList := f.isFormat("weblist")
+ flagList := f.isFormat("list")
+ flagCallgrind := f.isFormat("callgrind")
+
+ if flagDis || flagWebList || flagCallgrind {
+ // Collect all samples at address granularity for assembly
+ // listing.
+ f.flagNodeCount = newInt(0)
+ f.flagAddresses = newBool(true)
+ f.flagLines = newBool(false)
+ f.flagFiles = newBool(false)
+ f.flagFunctions = newBool(false)
+ }
+
+ if flagPeek {
+ // Collect all samples at function granularity for peek command
+ f.flagNodeCount = newInt(0)
+ f.flagAddresses = newBool(false)
+ f.flagLines = newBool(false)
+ f.flagFiles = newBool(false)
+ f.flagFunctions = newBool(true)
+ }
+
+ if flagList {
+ // Collect all samples at fileline granularity for source
+ // listing.
+ f.flagNodeCount = newInt(0)
+ f.flagAddresses = newBool(false)
+ f.flagLines = newBool(true)
+ f.flagFiles = newBool(false)
+ f.flagFunctions = newBool(false)
+ }
+
+ if !*f.flagTrim {
+ f.flagNodeCount = newInt(0)
+ f.flagNodeFraction = newFloat64(0)
+ f.flagEdgeFraction = newFloat64(0)
+ }
+
+ if oc := countFlagMap(f.flagCommands, f.flagParamCommands); oc == 0 {
+ f.flagInteractive = newBool(true)
+ } else if oc > 1 {
+ f.usage(ui)
+ return fmt.Errorf("must set at most one output format")
+ }
+
+ // Apply nodecount defaults for non-interactive mode. The
+ // interactive shell will apply defaults for the interactive mode.
+ if *f.flagNodeCount < 0 && !*f.flagInteractive {
+ switch {
+ default:
+ f.flagNodeCount = newInt(80)
+ case f.isFormat("text"):
+ f.flagNodeCount = newInt(0)
+ }
+ }
+
+ // Apply legacy options and diagnose conflicts.
+ if rc := countFlags([]*bool{f.flagAddresses, f.flagLines, f.flagFiles, f.flagFunctions}); rc == 0 {
+ f.flagFunctions = newBool(true)
+ } else if rc > 1 {
+ f.usage(ui)
+ return fmt.Errorf("must set at most one granularity option")
+ }
+
+ var err error
+ si, sm := *f.flagSampleIndex, *f.flagMean || *f.flagMeanDelay
+ si, err = sampleIndex(p, &f.flagTotalDelay, si, 1, "delay", "-total_delay", err)
+ si, err = sampleIndex(p, &f.flagMeanDelay, si, 1, "delay", "-mean_delay", err)
+ si, err = sampleIndex(p, &f.flagContentions, si, 0, "contentions", "-contentions", err)
+
+ si, err = sampleIndex(p, &f.flagInUseSpace, si, 1, "inuse_space", "-inuse_space", err)
+ si, err = sampleIndex(p, &f.flagInUseObjects, si, 0, "inuse_objects", "-inuse_objects", err)
+ si, err = sampleIndex(p, &f.flagAllocSpace, si, 1, "alloc_space", "-alloc_space", err)
+ si, err = sampleIndex(p, &f.flagAllocObjects, si, 0, "alloc_objects", "-alloc_objects", err)
+
+ if si == -1 {
+ // Use last value if none is requested.
+ si = len(p.SampleType) - 1
+ } else if si < 0 || si >= len(p.SampleType) {
+ err = fmt.Errorf("sample_index value %d out of range [0..%d]", si, len(p.SampleType)-1)
+ }
+
+ if err != nil {
+ f.usage(ui)
+ return err
+ }
+ f.flagSampleIndex, f.flagMean = newInt(si), newBool(sm)
+ return nil
+}
+
+func sampleIndex(p *profile.Profile, flag **bool,
+ sampleIndex int,
+ newSampleIndex int,
+ sampleType, option string,
+ err error) (int, error) {
+ if err != nil || !**flag {
+ return sampleIndex, err
+ }
+ *flag = newBool(false)
+ if sampleIndex != -1 {
+ return 0, fmt.Errorf("set at most one sample value selection option")
+ }
+ if newSampleIndex >= len(p.SampleType) ||
+ p.SampleType[newSampleIndex].Type != sampleType {
+ return 0, fmt.Errorf("option %s not valid for this profile", option)
+ }
+ return newSampleIndex, nil
+}
+
+func countFlags(bs []*bool) int {
+ var c int
+ for _, b := range bs {
+ if *b {
+ c++
+ }
+ }
+ return c
+}
+
+func countFlagMap(bms map[string]*bool, bmrxs map[string]*string) int {
+ var c int
+ for _, b := range bms {
+ if *b {
+ c++
+ }
+ }
+ for _, s := range bmrxs {
+ if *s != "" {
+ c++
+ }
+ }
+ return c
+}
+
+var usageMsgHdr = "usage: pprof [options] [binary] <profile source> ...\n" +
+ "Output format (only set one):\n"
+
+var usageMsg = "Output file parameters (for file-based output formats):\n" +
+ " -output=f Generate output on file f (stdout by default)\n" +
+ "Output granularity (only set one):\n" +
+ " -functions Report at function level [default]\n" +
+ " -files Report at source file level\n" +
+ " -lines Report at source line level\n" +
+ " -addresses Report at address level\n" +
+ "Comparison options:\n" +
+ " -base <profile> Show delta from this profile\n" +
+ " -drop_negative Ignore negative differences\n" +
+ "Sorting options:\n" +
+ " -cum Sort by cumulative data\n\n" +
+ "Dynamic profile options:\n" +
+ " -seconds=N Length of time for dynamic profiles\n" +
+ "Profile trimming options:\n" +
+ " -nodecount=N Max number of nodes to show\n" +
+ " -nodefraction=f Hide nodes below <f>*total\n" +
+ " -edgefraction=f Hide edges below <f>*total\n" +
+ "Sample value selection option (by index):\n" +
+ " -sample_index Index of sample value to display\n" +
+ " -mean Average sample value over first value\n" +
+ "Sample value selection option (for heap profiles):\n" +
+ " -inuse_space Display in-use memory size\n" +
+ " -inuse_objects Display in-use object counts\n" +
+ " -alloc_space Display allocated memory size\n" +
+ " -alloc_objects Display allocated object counts\n" +
+ "Sample value selection option (for contention profiles):\n" +
+ " -total_delay Display total delay at each region\n" +
+ " -contentions Display number of delays at each region\n" +
+ " -mean_delay Display mean delay at each region\n" +
+ "Filtering options:\n" +
+ " -runtime Show runtime call frames in memory profiles\n" +
+ " -focus=r Restricts to paths going through a node matching regexp\n" +
+ " -ignore=r Skips paths going through any nodes matching regexp\n" +
+ " -tagfocus=r Restrict to samples tagged with key:value matching regexp\n" +
+ " Restrict to samples with numeric tags in range (eg \"32kb:1mb\")\n" +
+ " -tagignore=r Discard samples tagged with key:value matching regexp\n" +
+ " Avoid samples with numeric tags in range (eg \"1mb:\")\n" +
+ "Miscellaneous:\n" +
+ " -call_tree Generate a context-sensitive call tree\n" +
+ " -unit=u Convert all samples to unit u for display\n" +
+ " -divide_by=f Scale all samples by dividing them by f\n" +
+ " -buildid=id Override build id for main binary in profile\n" +
+ " -tools=path Search path for object-level tools\n" +
+ " -help This message"
+
+var usageMsgVars = "Environment Variables:\n" +
+ " PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" +
+ " PPROF_TOOLS Search path for object-level tools\n" +
+ " PPROF_BINARY_PATH Search path for local binary files\n" +
+ " default: $HOME/pprof/binaries\n" +
+ " finds binaries by $name and $buildid/$name"
+
+func aggregate(prof *profile.Profile, f *flags) error {
+ switch {
+ case f.isFormat("proto"), f.isFormat("raw"):
+ // No aggregation for raw profiles.
+ case *f.flagLines:
+ return prof.Aggregate(true, true, true, true, false)
+ case *f.flagFiles:
+ return prof.Aggregate(true, false, true, false, false)
+ case *f.flagFunctions:
+ return prof.Aggregate(true, true, false, false, false)
+ case f.isFormat("weblist"), f.isFormat("disasm"), f.isFormat("callgrind"):
+ return prof.Aggregate(false, true, true, true, true)
+ }
+ return nil
+}
+
+// parseOptions parses the options into report.Options
+// Returns a function to postprocess the report after generation.
+func parseOptions(f *flags) (o *report.Options, p commands.PostProcessor, err error) {
+
+ if *f.flagDivideBy == 0 {
+ return nil, nil, fmt.Errorf("zero divisor specified")
+ }
+
+ o = &report.Options{
+ CumSort: *f.flagCum,
+ CallTree: *f.flagCallTree,
+ PrintAddresses: *f.flagAddresses,
+ DropNegative: *f.flagDropNegative,
+ Ratio: 1 / *f.flagDivideBy,
+
+ NodeCount: *f.flagNodeCount,
+ NodeFraction: *f.flagNodeFraction,
+ EdgeFraction: *f.flagEdgeFraction,
+ OutputUnit: *f.flagDisplayUnit,
+ }
+
+ for cmd, b := range f.flagCommands {
+ if *b {
+ pcmd := f.commands[cmd]
+ o.OutputFormat = pcmd.Format
+ return o, pcmd.PostProcess, nil
+ }
+ }
+
+ for cmd, rx := range f.flagParamCommands {
+ if *rx != "" {
+ pcmd := f.commands[cmd]
+ if o.Symbol, err = regexp.Compile(*rx); err != nil {
+ return nil, nil, fmt.Errorf("parsing -%s regexp: %v", cmd, err)
+ }
+ o.OutputFormat = pcmd.Format
+ return o, pcmd.PostProcess, nil
+ }
+ }
+
+ return nil, nil, fmt.Errorf("no output format selected")
+}
+
+type sampleValueFunc func(*profile.Sample) int64
+
+// sampleFormat returns a function to extract values out of a profile.Sample,
+// and the type/units of those values.
+func sampleFormat(p *profile.Profile, f *flags) (sampleValueFunc, string, string) {
+ valueIndex := *f.flagSampleIndex
+
+ if *f.flagMean {
+ return meanExtractor(valueIndex), "mean_" + p.SampleType[valueIndex].Type, p.SampleType[valueIndex].Unit
+ }
+
+ return valueExtractor(valueIndex), p.SampleType[valueIndex].Type, p.SampleType[valueIndex].Unit
+}
+
+func valueExtractor(ix int) sampleValueFunc {
+ return func(s *profile.Sample) int64 {
+ return s.Value[ix]
+ }
+}
+
+func meanExtractor(ix int) sampleValueFunc {
+ return func(s *profile.Sample) int64 {
+ if s.Value[0] == 0 {
+ return 0
+ }
+ return s.Value[ix] / s.Value[0]
+ }
+}
+
+func generate(interactive bool, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
+ o, postProcess, err := parseOptions(f)
+ if err != nil {
+ return err
+ }
+
+ var w io.Writer
+ if *f.flagOutput == "" {
+ w = os.Stdout
+ } else {
+ ui.PrintErr("Generating report in ", *f.flagOutput)
+ outputFile, err := os.Create(*f.flagOutput)
+ if err != nil {
+ return err
+ }
+ defer outputFile.Close()
+ w = outputFile
+ }
+
+ if prof.Empty() {
+ return fmt.Errorf("profile is empty")
+ }
+
+ value, stype, unit := sampleFormat(prof, f)
+ o.SampleType = stype
+ rpt := report.New(prof, *o, value, unit)
+
+ // Do not apply filters if we're just generating a proto, so we
+ // still have all the data.
+ if o.OutputFormat != report.Proto {
+ // Delay applying focus/ignore until after creating the report so
+ // the report reflects the total number of samples.
+ if err := preprocess(prof, ui, f); err != nil {
+ return err
+ }
+ }
+
+ if postProcess == nil {
+ return report.Generate(w, rpt, obj)
+ }
+
+ var dot bytes.Buffer
+ if err = report.Generate(&dot, rpt, obj); err != nil {
+ return err
+ }
+
+ return postProcess(&dot, w, ui)
+}
diff --git a/src/cmd/pprof/internal/driver/interactive.go b/src/cmd/pprof/internal/driver/interactive.go
new file mode 100644
index 0000000000..1fa07a2cd7
--- /dev/null
+++ b/src/cmd/pprof/internal/driver/interactive.go
@@ -0,0 +1,492 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package driver
+
+import (
+ "fmt"
+ "io"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cmd/pprof/internal/commands"
+ "cmd/pprof/internal/plugin"
+ "internal/pprof/profile"
+)
+
+var profileFunctionNames = []string{}
+
+// functionCompleter replaces provided substring with a function
+// name retrieved from a profile if a single match exists. Otherwise,
+// it returns unchanged substring. It defaults to no-op if the profile
+// is not specified.
+func functionCompleter(substring string) string {
+ found := ""
+ for _, fName := range profileFunctionNames {
+ if strings.Contains(fName, substring) {
+ if found != "" {
+ return substring
+ }
+ found = fName
+ }
+ }
+ if found != "" {
+ return found
+ }
+ return substring
+}
+
+// updateAutoComplete enhances autocompletion with information that can be
+// retrieved from the profile
+func updateAutoComplete(p *profile.Profile) {
+ profileFunctionNames = nil // remove function names retrieved previously
+ for _, fn := range p.Function {
+ profileFunctionNames = append(profileFunctionNames, fn.Name)
+ }
+}
+
+// splitCommand splits the command line input into tokens separated by
+// spaces. Takes care to separate commands of the form 'top10' into
+// two tokens: 'top' and '10'
+func splitCommand(input string) []string {
+ fields := strings.Fields(input)
+ if num := strings.IndexAny(fields[0], "0123456789"); num != -1 {
+ inputNumber := fields[0][num:]
+ fields[0] = fields[0][:num]
+ fields = append([]string{fields[0], inputNumber}, fields[1:]...)
+ }
+ return fields
+}
+
+// interactive displays a prompt and reads commands for profile
+// manipulation/visualization.
+func interactive(p *profile.Profile, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
+ updateAutoComplete(p)
+
+ // Enter command processing loop.
+ ui.Print("Entering interactive mode (type \"help\" for commands)")
+ ui.SetAutoComplete(commands.NewCompleter(f.commands))
+
+ for {
+ input, err := readCommand(p, ui, f)
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if input == "" {
+ return nil
+ }
+ }
+ // Process simple commands.
+ switch input {
+ case "":
+ continue
+ case ":":
+ f.flagFocus = newString("")
+ f.flagIgnore = newString("")
+ f.flagTagFocus = newString("")
+ f.flagTagIgnore = newString("")
+ f.flagHide = newString("")
+ continue
+ }
+
+ fields := splitCommand(input)
+ // Process report generation commands.
+ if _, ok := f.commands[fields[0]]; ok {
+ if err := generateReport(p, fields, obj, ui, f); err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ ui.PrintErr(err)
+ }
+ continue
+ }
+
+ switch cmd := fields[0]; cmd {
+ case "help":
+ commandHelp(fields, ui, f)
+ continue
+ case "exit", "quit":
+ return nil
+ }
+
+ // Process option settings.
+ if of, err := optFlags(p, input, f); err == nil {
+ f = of
+ } else {
+ ui.PrintErr("Error: ", err.Error())
+ }
+ }
+}
+
+func generateReport(p *profile.Profile, cmd []string, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
+ prof := p.Copy()
+
+ cf, err := cmdFlags(prof, cmd, ui, f)
+ if err != nil {
+ return err
+ }
+
+ return generate(true, prof, obj, ui, cf)
+}
+
+// validateRegex checks if a string is a valid regular expression.
+func validateRegex(v string) error {
+ _, err := regexp.Compile(v)
+ return err
+}
+
+// readCommand prompts for and reads the next command.
+func readCommand(p *profile.Profile, ui plugin.UI, f *flags) (string, error) {
+ //ui.Print("Options:\n", f.String(p))
+ s, err := ui.ReadLine()
+ return strings.TrimSpace(s), err
+}
+
+func commandHelp(_ []string, ui plugin.UI, f *flags) error {
+ help := `
+ Commands:
+ cmd [n] [--cum] [focus_regex]* [-ignore_regex]*
+ Produce a text report with the top n entries.
+ Include samples matching focus_regex, and exclude ignore_regex.
+ Add --cum to sort using cumulative data.
+ Available commands:
+`
+ var commands []string
+ for name, cmd := range f.commands {
+ commands = append(commands, fmt.Sprintf(" %-12s %s", name, cmd.Usage))
+ }
+ sort.Strings(commands)
+
+ help = help + strings.Join(commands, "\n") + `
+ peek func_regex
+ Display callers and callees of functions matching func_regex.
+
+ dot [n] [focus_regex]* [-ignore_regex]* [>file]
+ Produce an annotated callgraph with the top n entries.
+ Include samples matching focus_regex, and exclude ignore_regex.
+ For other outputs, replace dot with:
+ - Graphic formats: dot, svg, pdf, ps, gif, png (use > to name output file)
+ - Graph viewer: gv, web, evince, eog
+
+ callgrind [n] [focus_regex]* [-ignore_regex]* [>file]
+ Produce a file in callgrind-compatible format.
+ Include samples matching focus_regex, and exclude ignore_regex.
+
+ weblist func_regex [-ignore_regex]*
+ Show annotated source with interspersed assembly in a web browser.
+
+ list func_regex [-ignore_regex]*
+ Print source for routines matching func_regex, and exclude ignore_regex.
+
+ disasm func_regex [-ignore_regex]*
+ Disassemble routines matching func_regex, and exclude ignore_regex.
+
+ tags tag_regex [-ignore_regex]*
+ List tags with key:value matching tag_regex and exclude ignore_regex.
+
+ quit/exit/^D
+ Exit pprof.
+
+ option=value
+ The following options can be set individually:
+ cum/flat: Sort entries based on cumulative or flat data
+ call_tree: Build context-sensitive call trees
+ nodecount: Max number of entries to display
+ nodefraction: Min frequency ratio of nodes to display
+ edgefraction: Min frequency ratio of edges to display
+ focus/ignore: Regexp to include/exclude samples by name/file
+ tagfocus/tagignore: Regexp or value range to filter samples by tag
+ eg "1mb", "1mb:2mb", ":64kb"
+
+ functions: Level of aggregation for sample data
+ files:
+ lines:
+ addresses:
+
+ unit: Measurement unit to use on reports
+
+ Sample value selection by index:
+ sample_index: Index of sample value to display
+ mean: Average sample value over first value
+
+ Sample value selection by name:
+ alloc_space for heap profiles
+ alloc_objects
+ inuse_space
+ inuse_objects
+
+ total_delay for contention profiles
+ mean_delay
+ contentions
+
+ : Clear focus/ignore/hide/tagfocus/tagignore`
+
+ ui.Print(help)
+ return nil
+}
+
+// cmdFlags parses the options of an interactive command and returns
+// an updated flags object.
+func cmdFlags(prof *profile.Profile, input []string, ui plugin.UI, f *flags) (*flags, error) {
+ cf := *f
+
+ var focus, ignore string
+ output := *cf.flagOutput
+ nodeCount := *cf.flagNodeCount
+ cmd := input[0]
+
+ // Update output flags based on parameters.
+ tokens := input[1:]
+ for p := 0; p < len(tokens); p++ {
+ t := tokens[p]
+ if t == "" {
+ continue
+ }
+ if c, err := strconv.ParseInt(t, 10, 32); err == nil {
+ nodeCount = int(c)
+ continue
+ }
+ switch t[0] {
+ case '>':
+ if len(t) > 1 {
+ output = t[1:]
+ continue
+ }
+ // find next token
+ for p++; p < len(tokens); p++ {
+ if tokens[p] != "" {
+ output = tokens[p]
+ break
+ }
+ }
+ case '-':
+ if t == "--cum" || t == "-cum" {
+ cf.flagCum = newBool(true)
+ continue
+ }
+ ignore = catRegex(ignore, t[1:])
+ default:
+ focus = catRegex(focus, t)
+ }
+ }
+
+ pcmd, ok := f.commands[cmd]
+ if !ok {
+ return nil, fmt.Errorf("Unexpected parse failure: %v", input)
+ }
+ // Reset flags
+ cf.flagCommands = make(map[string]*bool)
+ cf.flagParamCommands = make(map[string]*string)
+
+ if !pcmd.HasParam {
+ cf.flagCommands[cmd] = newBool(true)
+
+ switch cmd {
+ case "tags":
+ cf.flagTagFocus = newString(focus)
+ cf.flagTagIgnore = newString(ignore)
+ default:
+ cf.flagFocus = newString(catRegex(*cf.flagFocus, focus))
+ cf.flagIgnore = newString(catRegex(*cf.flagIgnore, ignore))
+ }
+ } else {
+ if focus == "" {
+ focus = "."
+ }
+ cf.flagParamCommands[cmd] = newString(focus)
+ cf.flagIgnore = newString(catRegex(*cf.flagIgnore, ignore))
+ }
+
+ if nodeCount < 0 {
+ switch cmd {
+ case "text", "top":
+ // Default text/top to 10 nodes on interactive mode
+ nodeCount = 10
+ default:
+ nodeCount = 80
+ }
+ }
+
+ cf.flagNodeCount = newInt(nodeCount)
+ cf.flagOutput = newString(output)
+
+ // Do regular flags processing
+ if err := processFlags(prof, ui, &cf); err != nil {
+ cf.usage(ui)
+ return nil, err
+ }
+
+ return &cf, nil
+}
+
+func catRegex(a, b string) string {
+ if a == "" {
+ return b
+ }
+ if b == "" {
+ return a
+ }
+ return a + "|" + b
+}
+
+// optFlags parses an interactive option setting and returns
+// an updated flags object.
+func optFlags(p *profile.Profile, input string, f *flags) (*flags, error) {
+ inputs := strings.SplitN(input, "=", 2)
+ option := strings.ToLower(strings.TrimSpace(inputs[0]))
+ var value string
+ if len(inputs) == 2 {
+ value = strings.TrimSpace(inputs[1])
+ }
+
+ of := *f
+
+ var err error
+ var bv bool
+ var uv uint64
+ var fv float64
+
+ switch option {
+ case "cum":
+ if bv, err = parseBool(value); err != nil {
+ return nil, err
+ }
+ of.flagCum = newBool(bv)
+ case "flat":
+ if bv, err = parseBool(value); err != nil {
+ return nil, err
+ }
+ of.flagCum = newBool(!bv)
+ case "call_tree":
+ if bv, err = parseBool(value); err != nil {
+ return nil, err
+ }
+ of.flagCallTree = newBool(bv)
+ case "unit":
+ of.flagDisplayUnit = newString(value)
+ case "sample_index":
+ if uv, err = strconv.ParseUint(value, 10, 32); err != nil {
+ return nil, err
+ }
+ if ix := int(uv); ix < 0 || ix >= len(p.SampleType) {
+ return nil, fmt.Errorf("sample_index out of range [0..%d]", len(p.SampleType)-1)
+ }
+ of.flagSampleIndex = newInt(int(uv))
+ case "mean":
+ if bv, err = parseBool(value); err != nil {
+ return nil, err
+ }
+ of.flagMean = newBool(bv)
+ case "nodecount":
+ if uv, err = strconv.ParseUint(value, 10, 32); err != nil {
+ return nil, err
+ }
+ of.flagNodeCount = newInt(int(uv))
+ case "nodefraction":
+ if fv, err = strconv.ParseFloat(value, 64); err != nil {
+ return nil, err
+ }
+ of.flagNodeFraction = newFloat64(fv)
+ case "edgefraction":
+ if fv, err = strconv.ParseFloat(value, 64); err != nil {
+ return nil, err
+ }
+ of.flagEdgeFraction = newFloat64(fv)
+ case "focus":
+ if err = validateRegex(value); err != nil {
+ return nil, err
+ }
+ of.flagFocus = newString(value)
+ case "ignore":
+ if err = validateRegex(value); err != nil {
+ return nil, err
+ }
+ of.flagIgnore = newString(value)
+ case "tagfocus":
+ if err = validateRegex(value); err != nil {
+ return nil, err
+ }
+ of.flagTagFocus = newString(value)
+ case "tagignore":
+ if err = validateRegex(value); err != nil {
+ return nil, err
+ }
+ of.flagTagIgnore = newString(value)
+ case "hide":
+ if err = validateRegex(value); err != nil {
+ return nil, err
+ }
+ of.flagHide = newString(value)
+ case "addresses", "files", "lines", "functions":
+ if bv, err = parseBool(value); err != nil {
+ return nil, err
+ }
+ if !bv {
+ return nil, fmt.Errorf("select one of addresses/files/lines/functions")
+ }
+ setGranularityToggle(option, &of)
+ default:
+ if ix := findSampleIndex(p, "", option); ix >= 0 {
+ of.flagSampleIndex = newInt(ix)
+ } else if ix := findSampleIndex(p, "total_", option); ix >= 0 {
+ of.flagSampleIndex = newInt(ix)
+ of.flagMean = newBool(false)
+ } else if ix := findSampleIndex(p, "mean_", option); ix >= 1 {
+ of.flagSampleIndex = newInt(ix)
+ of.flagMean = newBool(true)
+ } else {
+ return nil, fmt.Errorf("unrecognized command: %s", input)
+ }
+ }
+ return &of, nil
+}
+
+// parseBool parses a string as a boolean value.
+func parseBool(v string) (bool, error) {
+ switch strings.ToLower(v) {
+ case "true", "t", "yes", "y", "1", "":
+ return true, nil
+ case "false", "f", "no", "n", "0":
+ return false, nil
+ }
+ return false, fmt.Errorf(`illegal input "%s" for bool value`, v)
+}
+
+func findSampleIndex(p *profile.Profile, prefix, sampleType string) int {
+ if !strings.HasPrefix(sampleType, prefix) {
+ return -1
+ }
+ sampleType = strings.TrimPrefix(sampleType, prefix)
+ for i, r := range p.SampleType {
+ if r.Type == sampleType {
+ return i
+ }
+ }
+ return -1
+}
+
+// setGranularityToggle manages the set of granularity options. These
+// operate as a toggle; turning one on turns the others off.
+func setGranularityToggle(o string, fl *flags) {
+ t, f := newBool(true), newBool(false)
+ fl.flagFunctions = f
+ fl.flagFiles = f
+ fl.flagLines = f
+ fl.flagAddresses = f
+ switch o {
+ case "functions":
+ fl.flagFunctions = t
+ case "files":
+ fl.flagFiles = t
+ case "lines":
+ fl.flagLines = t
+ case "addresses":
+ fl.flagAddresses = t
+ default:
+ panic(fmt.Errorf("unexpected option %s", o))
+ }
+}
diff --git a/src/cmd/pprof/internal/fetch/fetch.go b/src/cmd/pprof/internal/fetch/fetch.go
new file mode 100644
index 0000000000..2e2de575f8
--- /dev/null
+++ b/src/cmd/pprof/internal/fetch/fetch.go
@@ -0,0 +1,82 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fetch provides an extensible mechanism to fetch a profile
+// from a data source.
+package fetch
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "cmd/pprof/internal/plugin"
+ "internal/pprof/profile"
+)
+
+// FetchProfile reads from a data source (network, file) and generates a
+// profile.
+func FetchProfile(source string, timeout time.Duration) (*profile.Profile, error) {
+ return Fetcher(source, timeout, plugin.StandardUI())
+}
+
+// Fetcher is the plugin.Fetcher version of FetchProfile.
+func Fetcher(source string, timeout time.Duration, ui plugin.UI) (*profile.Profile, error) {
+ var f io.ReadCloser
+ var err error
+
+ url, err := url.Parse(source)
+ if err == nil && url.Host != "" {
+ f, err = FetchURL(source, timeout)
+ } else {
+ f, err = os.Open(source)
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return profile.Parse(f)
+}
+
+// FetchURL fetches a profile from a URL using HTTP.
+func FetchURL(source string, timeout time.Duration) (io.ReadCloser, error) {
+ resp, err := httpGet(source, timeout)
+ if err != nil {
+ return nil, fmt.Errorf("http fetch %s: %v", source, err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("server response: %s", resp.Status)
+ }
+
+ return resp.Body, nil
+}
+
+// PostURL issues a POST to a URL over HTTP.
+func PostURL(source, post string) ([]byte, error) {
+ resp, err := http.Post(source, "application/octet-stream", strings.NewReader(post))
+ if err != nil {
+ return nil, fmt.Errorf("http post %s: %v", source, err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("server response: %s", resp.Status)
+ }
+ defer resp.Body.Close()
+ return ioutil.ReadAll(resp.Body)
+}
+
+// httpGet is a wrapper around http.Get; it is defined as a variable
+// so it can be redefined during for testing.
+var httpGet = func(url string, timeout time.Duration) (*http.Response, error) {
+ client := &http.Client{
+ Transport: &http.Transport{
+ ResponseHeaderTimeout: timeout + 5*time.Second,
+ },
+ }
+ return client.Get(url)
+}
diff --git a/src/cmd/pprof/internal/plugin/plugin.go b/src/cmd/pprof/internal/plugin/plugin.go
new file mode 100644
index 0000000000..ff1e8adfaf
--- /dev/null
+++ b/src/cmd/pprof/internal/plugin/plugin.go
@@ -0,0 +1,213 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package plugin defines the plugin implementations that the main pprof driver requires.
+package plugin
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+ "time"
+
+ "internal/pprof/profile"
+)
+
+// A FlagSet creates and parses command-line flags.
+// It is similar to the standard flag.FlagSet.
+type FlagSet interface {
+ // Bool, Int, Float64, and String define new flags,
+ // like the functions of the same name in package flag.
+ Bool(name string, def bool, usage string) *bool
+ Int(name string, def int, usage string) *int
+ Float64(name string, def float64, usage string) *float64
+ String(name string, def string, usage string) *string
+
+ // ExtraUsage returns any additional text that should be
+ // printed after the standard usage message.
+ // The typical use of ExtraUsage is to show any custom flags
+ // defined by the specific pprof plugins being used.
+ ExtraUsage() string
+
+ // Parse initializes the flags with their values for this run
+ // and returns the non-flag command line arguments.
+ // If an unknown flag is encountered or there are no arguments,
+ // Parse should call usage and return nil.
+ Parse(usage func()) []string
+}
+
+// An ObjTool inspects shared libraries and executable files.
+type ObjTool interface {
+ // Open opens the named object file.
+ // If the object is a shared library, start is the address where
+ // it is mapped into memory in the address space being inspected.
+ Open(file string, start uint64) (ObjFile, error)
+
+ // Demangle translates a batch of symbol names from mangled
+ // form to human-readable form.
+ Demangle(names []string) (map[string]string, error)
+
+ // Disasm disassembles the named object file, starting at
+ // the start address and stopping at (before) the end address.
+ Disasm(file string, start, end uint64) ([]Inst, error)
+
+ // SetConfig configures the tool.
+ // The implementation defines the meaning of the string
+ // and can ignore it entirely.
+ SetConfig(config string)
+}
+
+// NoObjTool returns a trivial implementation of the ObjTool interface.
+// Open returns an error indicating that the requested file does not exist.
+// Demangle returns an empty map and a nil error.
+// Disasm returns an error.
+// SetConfig is a no-op.
+func NoObjTool() ObjTool {
+ return noObjTool{}
+}
+
+type noObjTool struct{}
+
+func (noObjTool) Open(file string, start uint64) (ObjFile, error) {
+ return nil, &os.PathError{Op: "open", Path: file, Err: os.ErrNotExist}
+}
+
+func (noObjTool) Demangle(name []string) (map[string]string, error) {
+ return make(map[string]string), nil
+}
+
+func (noObjTool) Disasm(file string, start, end uint64) ([]Inst, error) {
+ return nil, fmt.Errorf("disassembly not supported")
+}
+
+func (noObjTool) SetConfig(config string) {
+}
+
+// An ObjFile is a single object file: a shared library or executable.
+type ObjFile interface {
+ // Name returns the underlyinf file name, if available
+ Name() string
+
+ // Base returns the base address to use when looking up symbols in the file.
+ Base() uint64
+
+ // BuildID returns the GNU build ID of the file, or an empty string.
+ BuildID() string
+
+ // SourceLine reports the source line information for a given
+ // address in the file. Due to inlining, the source line information
+ // is in general a list of positions representing a call stack,
+ // with the leaf function first.
+ SourceLine(addr uint64) ([]Frame, error)
+
+ // Symbols returns a list of symbols in the object file.
+ // If r is not nil, Symbols restricts the list to symbols
+ // with names matching the regular expression.
+ // If addr is not zero, Symbols restricts the list to symbols
+ // containing that address.
+ Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error)
+
+ // Close closes the file, releasing associated resources.
+ Close() error
+}
+
+// A Frame describes a single line in a source file.
+type Frame struct {
+ Func string // name of function
+ File string // source file name
+ Line int // line in file
+}
+
+// A Sym describes a single symbol in an object file.
+type Sym struct {
+ Name []string // names of symbol (many if symbol was dedup'ed)
+ File string // object file containing symbol
+ Start uint64 // start virtual address
+ End uint64 // virtual address of last byte in sym (Start+size-1)
+}
+
+// An Inst is a single instruction in an assembly listing.
+type Inst struct {
+ Addr uint64 // virtual address of instruction
+ Text string // instruction text
+ File string // source file
+ Line int // source line
+}
+
+// A UI manages user interactions.
+type UI interface {
+ // Read returns a line of text (a command) read from the user.
+ ReadLine() (string, error)
+
+ // Print shows a message to the user.
+ // It formats the text as fmt.Print would and adds a final \n if not already present.
+ // For line-based UI, Print writes to standard error.
+ // (Standard output is reserved for report data.)
+ Print(...interface{})
+
+ // PrintErr shows an error message to the user.
+ // It formats the text as fmt.Print would and adds a final \n if not already present.
+ // For line-based UI, PrintErr writes to standard error.
+ PrintErr(...interface{})
+
+ // IsTerminal returns whether the UI is known to be tied to an
+ // interactive terminal (as opposed to being redirected to a file).
+ IsTerminal() bool
+
+ // SetAutoComplete instructs the UI to call complete(cmd) to obtain
+ // the auto-completion of cmd, if the UI supports auto-completion at all.
+ SetAutoComplete(complete func(string) string)
+}
+
+// StandardUI returns a UI that reads from standard input,
+// prints messages to standard output,
+// prints errors to standard error, and doesn't use auto-completion.
+func StandardUI() UI {
+ return &stdUI{r: bufio.NewReader(os.Stdin)}
+}
+
+type stdUI struct {
+ r *bufio.Reader
+}
+
+func (ui *stdUI) ReadLine() (string, error) {
+ os.Stdout.WriteString("(pprof) ")
+ return ui.r.ReadString('\n')
+}
+
+func (ui *stdUI) Print(args ...interface{}) {
+ ui.fprint(os.Stderr, args)
+}
+
+func (ui *stdUI) PrintErr(args ...interface{}) {
+ ui.fprint(os.Stderr, args)
+}
+
+func (ui *stdUI) IsTerminal() bool {
+ return false
+}
+
+func (ui *stdUI) SetAutoComplete(func(string) string) {
+}
+
+func (ui *stdUI) fprint(f *os.File, args []interface{}) {
+ text := fmt.Sprint(args...)
+ if !strings.HasSuffix(text, "\n") {
+ text += "\n"
+ }
+ f.WriteString(text)
+}
+
+// A Fetcher reads and returns the profile named by src.
+// It gives up after the given timeout, unless src contains a timeout override
+// (as defined by the implementation).
+// It can print messages to ui.
+type Fetcher func(src string, timeout time.Duration, ui UI) (*profile.Profile, error)
+
+// A Symbolizer annotates a profile with symbol information.
+// The profile was fetch from src.
+// The meaning of mode is defined by the implementation.
+type Symbolizer func(mode, src string, prof *profile.Profile, obj ObjTool, ui UI) error
diff --git a/src/cmd/pprof/internal/report/report.go b/src/cmd/pprof/internal/report/report.go
new file mode 100644
index 0000000000..14875c16db
--- /dev/null
+++ b/src/cmd/pprof/internal/report/report.go
@@ -0,0 +1,1726 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package report summarizes a performance profile into a
+// human-readable report.
+package report
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "cmd/pprof/internal/plugin"
+ "internal/pprof/profile"
+)
+
+// Generate generates a report as directed by the Report.
+func Generate(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
+ o := rpt.options
+
+ switch o.OutputFormat {
+ case Dot:
+ return printDOT(w, rpt)
+ case Tree:
+ return printTree(w, rpt)
+ case Text:
+ return printText(w, rpt)
+ case Raw:
+ fmt.Fprint(w, rpt.prof.String())
+ return nil
+ case Tags:
+ return printTags(w, rpt)
+ case Proto:
+ return rpt.prof.Write(w)
+ case Dis:
+ return printAssembly(w, rpt, obj)
+ case List:
+ return printSource(w, rpt)
+ case WebList:
+ return printWebSource(w, rpt, obj)
+ case Callgrind:
+ return printCallgrind(w, rpt)
+ }
+ return fmt.Errorf("unexpected output format")
+}
+
+// printAssembly prints an annotated assembly listing.
+func printAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
+ g, err := newGraph(rpt)
+ if err != nil {
+ return err
+ }
+
+ o := rpt.options
+ prof := rpt.prof
+
+ // If the regexp source can be parsed as an address, also match
+ // functions that land on that address.
+ var address *uint64
+ if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil {
+ address = &hex
+ }
+
+ fmt.Fprintln(w, "Total:", rpt.formatValue(rpt.total))
+ symbols := symbolsFromBinaries(prof, g, o.Symbol, address, obj)
+ symNodes := nodesPerSymbol(g.ns, symbols)
+ // Sort function names for printing.
+ var syms objSymbols
+ for s := range symNodes {
+ syms = append(syms, s)
+ }
+ sort.Sort(syms)
+
+ // Correlate the symbols from the binary with the profile samples.
+ for _, s := range syms {
+ sns := symNodes[s]
+
+ // Gather samples for this symbol.
+ flatSum, cumSum := sumNodes(sns)
+
+ // Get the function assembly.
+ insns, err := obj.Disasm(s.sym.File, s.sym.Start, s.sym.End)
+ if err != nil {
+ return err
+ }
+
+ ns := annotateAssembly(insns, sns, s.base)
+
+ fmt.Fprintf(w, "ROUTINE ======================== %s\n", s.sym.Name[0])
+ for _, name := range s.sym.Name[1:] {
+ fmt.Fprintf(w, " AKA ======================== %s\n", name)
+ }
+ fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
+ rpt.formatValue(flatSum), rpt.formatValue(cumSum),
+ percentage(cumSum, rpt.total))
+
+ for _, n := range ns {
+ fmt.Fprintf(w, "%10s %10s %10x: %s\n", valueOrDot(n.flat, rpt), valueOrDot(n.cum, rpt), n.info.address, n.info.name)
+ }
+ }
+ return nil
+}
+
+// symbolsFromBinaries examines the binaries listed on the profile
+// that have associated samples, and identifies symbols matching rx.
+func symbolsFromBinaries(prof *profile.Profile, g graph, rx *regexp.Regexp, address *uint64, obj plugin.ObjTool) []*objSymbol {
+ hasSamples := make(map[string]bool)
+ // Only examine mappings that have samples that match the
+ // regexp. This is an optimization to speed up pprof.
+ for _, n := range g.ns {
+ if name := n.info.prettyName(); rx.MatchString(name) && n.info.objfile != "" {
+ hasSamples[n.info.objfile] = true
+ }
+ }
+
+ // Walk all mappings looking for matching functions with samples.
+ var objSyms []*objSymbol
+ for _, m := range prof.Mapping {
+ if !hasSamples[filepath.Base(m.File)] {
+ if address == nil || !(m.Start <= *address && *address <= m.Limit) {
+ continue
+ }
+ }
+
+ f, err := obj.Open(m.File, m.Start)
+ if err != nil {
+ fmt.Printf("%v\n", err)
+ continue
+ }
+
+ // Find symbols in this binary matching the user regexp.
+ var addr uint64
+ if address != nil {
+ addr = *address
+ }
+ msyms, err := f.Symbols(rx, addr)
+ base := f.Base()
+ f.Close()
+ if err != nil {
+ continue
+ }
+ for _, ms := range msyms {
+ objSyms = append(objSyms,
+ &objSymbol{
+ sym: ms,
+ base: base,
+ },
+ )
+ }
+ }
+
+ return objSyms
+}
+
+// objSym represents a symbol identified from a binary. It includes
+// the SymbolInfo from the disasm package and the base that must be
+// added to correspond to sample addresses
+type objSymbol struct {
+ sym *plugin.Sym
+ base uint64
+}
+
+// objSymbols is a wrapper type to enable sorting of []*objSymbol.
+type objSymbols []*objSymbol
+
+func (o objSymbols) Len() int {
+ return len(o)
+}
+
+func (o objSymbols) Less(i, j int) bool {
+ if namei, namej := o[i].sym.Name[0], o[j].sym.Name[0]; namei != namej {
+ return namei < namej
+ }
+ return o[i].sym.Start < o[j].sym.Start
+}
+
+func (o objSymbols) Swap(i, j int) {
+ o[i], o[j] = o[j], o[i]
+}
+
+// nodesPerSymbol classifies nodes into a group of symbols.
+func nodesPerSymbol(ns nodes, symbols []*objSymbol) map[*objSymbol]nodes {
+ symNodes := make(map[*objSymbol]nodes)
+ for _, s := range symbols {
+ // Gather samples for this symbol.
+ for _, n := range ns {
+ address := n.info.address - s.base
+ if address >= s.sym.Start && address < s.sym.End {
+ symNodes[s] = append(symNodes[s], n)
+ }
+ }
+ }
+ return symNodes
+}
+
+// annotateAssembly annotates a set of assembly instructions with a
+// set of samples. It returns a set of nodes to display. base is an
+// offset to adjust the sample addresses.
+func annotateAssembly(insns []plugin.Inst, samples nodes, base uint64) nodes {
+ // Add end marker to simplify printing loop.
+ insns = append(insns, plugin.Inst{
+ Addr: ^uint64(0),
+ })
+
+ // Ensure samples are sorted by address.
+ samples.sort(addressOrder)
+
+ var s int
+ var asm nodes
+ for ix, in := range insns[:len(insns)-1] {
+ n := node{
+ info: nodeInfo{
+ address: in.Addr,
+ name: in.Text,
+ file: trimPath(in.File),
+ lineno: in.Line,
+ },
+ }
+
+ // Sum all the samples until the next instruction (to account
+ // for samples attributed to the middle of an instruction).
+ for next := insns[ix+1].Addr; s < len(samples) && samples[s].info.address-base < next; s++ {
+ n.flat += samples[s].flat
+ n.cum += samples[s].cum
+ if samples[s].info.file != "" {
+ n.info.file = trimPath(samples[s].info.file)
+ n.info.lineno = samples[s].info.lineno
+ }
+ }
+ asm = append(asm, &n)
+ }
+
+ return asm
+}
+
+// valueOrDot formats a value according to a report, intercepting zero
+// values.
+func valueOrDot(value int64, rpt *Report) string {
+ if value == 0 {
+ return "."
+ }
+ return rpt.formatValue(value)
+}
+
+// printTags collects all tags referenced in the profile and prints
+// them in a sorted table.
+func printTags(w io.Writer, rpt *Report) error {
+ p := rpt.prof
+
+ // Hashtable to keep accumulate tags as key,value,count.
+ tagMap := make(map[string]map[string]int64)
+ for _, s := range p.Sample {
+ for key, vals := range s.Label {
+ for _, val := range vals {
+ if valueMap, ok := tagMap[key]; ok {
+ valueMap[val] = valueMap[val] + s.Value[0]
+ continue
+ }
+ valueMap := make(map[string]int64)
+ valueMap[val] = s.Value[0]
+ tagMap[key] = valueMap
+ }
+ }
+ for key, vals := range s.NumLabel {
+ for _, nval := range vals {
+ val := scaledValueLabel(nval, key, "auto")
+ if valueMap, ok := tagMap[key]; ok {
+ valueMap[val] = valueMap[val] + s.Value[0]
+ continue
+ }
+ valueMap := make(map[string]int64)
+ valueMap[val] = s.Value[0]
+ tagMap[key] = valueMap
+ }
+ }
+ }
+
+ tagKeys := make(tags, 0, len(tagMap))
+ for key := range tagMap {
+ tagKeys = append(tagKeys, &tag{name: key})
+ }
+ sort.Sort(tagKeys)
+
+ for _, tagKey := range tagKeys {
+ var total int64
+ key := tagKey.name
+ tags := make(tags, 0, len(tagMap[key]))
+ for t, c := range tagMap[key] {
+ total += c
+ tags = append(tags, &tag{name: t, weight: c})
+ }
+
+ sort.Sort(tags)
+ fmt.Fprintf(w, "%s: Total %d\n", key, total)
+ for _, t := range tags {
+ if total > 0 {
+ fmt.Fprintf(w, " %8d (%s): %s\n", t.weight,
+ percentage(t.weight, total), t.name)
+ } else {
+ fmt.Fprintf(w, " %8d: %s\n", t.weight, t.name)
+ }
+ }
+ fmt.Fprintln(w)
+ }
+ return nil
+}
+
+// printText prints a flat text report for a profile.
+func printText(w io.Writer, rpt *Report) error {
+ g, err := newGraph(rpt)
+ if err != nil {
+ return err
+ }
+
+ origCount, droppedNodes, _ := g.preprocess(rpt)
+ fmt.Fprintln(w, strings.Join(legendDetailLabels(rpt, g, origCount, droppedNodes, 0), "\n"))
+
+ fmt.Fprintf(w, "%10s %5s%% %5s%% %10s %5s%%\n",
+ "flat", "flat", "sum", "cum", "cum")
+
+ var flatSum int64
+ for _, n := range g.ns {
+ name, flat, cum := n.info.prettyName(), n.flat, n.cum
+
+ flatSum += flat
+ fmt.Fprintf(w, "%10s %s %s %10s %s %s\n",
+ rpt.formatValue(flat),
+ percentage(flat, rpt.total),
+ percentage(flatSum, rpt.total),
+ rpt.formatValue(cum),
+ percentage(cum, rpt.total),
+ name)
+ }
+ return nil
+}
+
+// printCallgrind prints a graph for a profile on callgrind format.
+func printCallgrind(w io.Writer, rpt *Report) error {
+ g, err := newGraph(rpt)
+ if err != nil {
+ return err
+ }
+
+ o := rpt.options
+ rpt.options.NodeFraction = 0
+ rpt.options.EdgeFraction = 0
+ rpt.options.NodeCount = 0
+
+ g.preprocess(rpt)
+
+ fmt.Fprintln(w, "positions: instr line")
+ fmt.Fprintln(w, "events:", o.SampleType+"("+o.OutputUnit+")")
+
+ objfiles := make(map[string]int)
+ files := make(map[string]int)
+ names := make(map[string]int)
+
+ // prevInfo points to the previous nodeInfo.
+ // It is used to group cost lines together as much as possible.
+ var prevInfo *nodeInfo
+ for _, n := range g.ns {
+ if prevInfo == nil || n.info.objfile != prevInfo.objfile || n.info.file != prevInfo.file || n.info.name != prevInfo.name {
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "ob="+callgrindName(objfiles, n.info.objfile))
+ fmt.Fprintln(w, "fl="+callgrindName(files, n.info.file))
+ fmt.Fprintln(w, "fn="+callgrindName(names, n.info.name))
+ }
+
+ addr := callgrindAddress(prevInfo, n.info.address)
+ sv, _ := ScaleValue(n.flat, o.SampleUnit, o.OutputUnit)
+ fmt.Fprintf(w, "%s %d %d\n", addr, n.info.lineno, int(sv))
+
+ // Print outgoing edges.
+ for _, out := range sortedEdges(n.out) {
+ c, _ := ScaleValue(out.weight, o.SampleUnit, o.OutputUnit)
+ callee := out.dest
+ fmt.Fprintln(w, "cfl="+callgrindName(files, callee.info.file))
+ fmt.Fprintln(w, "cfn="+callgrindName(names, callee.info.name))
+ fmt.Fprintf(w, "calls=%d %s %d\n", int(c), callgrindAddress(prevInfo, callee.info.address), callee.info.lineno)
+ // TODO: This address may be in the middle of a call
+ // instruction. It would be best to find the beginning
+ // of the instruction, but the tools seem to handle
+ // this OK.
+ fmt.Fprintf(w, "* * %d\n", int(c))
+ }
+
+ prevInfo = &n.info
+ }
+
+ return nil
+}
+
+// callgrindName implements the callgrind naming compression scheme.
+// For names not previously seen returns "(N) name", where N is a
+// unique index. For names previously seen returns "(N)" where N is
+// the index returned the first time.
+func callgrindName(names map[string]int, name string) string {
+ if name == "" {
+ return ""
+ }
+ if id, ok := names[name]; ok {
+ return fmt.Sprintf("(%d)", id)
+ }
+ id := len(names) + 1
+ names[name] = id
+ return fmt.Sprintf("(%d) %s", id, name)
+}
+
+// callgrindAddress implements the callgrind subposition compression scheme if
+// possible. If prevInfo != nil, it contains the previous address. The current
+// address can be given relative to the previous address, with an explicit +/-
+// to indicate it is relative, or * for the same address.
+func callgrindAddress(prevInfo *nodeInfo, curr uint64) string {
+ abs := fmt.Sprintf("%#x", curr)
+ if prevInfo == nil {
+ return abs
+ }
+
+ prev := prevInfo.address
+ if prev == curr {
+ return "*"
+ }
+
+ diff := int64(curr - prev)
+ relative := fmt.Sprintf("%+d", diff)
+
+ // Only bother to use the relative address if it is actually shorter.
+ if len(relative) < len(abs) {
+ return relative
+ }
+
+ return abs
+}
+
+// printTree prints a tree-based report in text form.
+func printTree(w io.Writer, rpt *Report) error {
+ const separator = "----------------------------------------------------------+-------------"
+ const legend = " flat flat% sum% cum cum% calls calls% + context "
+
+ g, err := newGraph(rpt)
+ if err != nil {
+ return err
+ }
+
+ origCount, droppedNodes, _ := g.preprocess(rpt)
+ fmt.Fprintln(w, strings.Join(legendDetailLabels(rpt, g, origCount, droppedNodes, 0), "\n"))
+
+ fmt.Fprintln(w, separator)
+ fmt.Fprintln(w, legend)
+ var flatSum int64
+
+ rx := rpt.options.Symbol
+ for _, n := range g.ns {
+ name, flat, cum := n.info.prettyName(), n.flat, n.cum
+
+ // Skip any entries that do not match the regexp (for the "peek" command).
+ if rx != nil && !rx.MatchString(name) {
+ continue
+ }
+
+ fmt.Fprintln(w, separator)
+ // Print incoming edges.
+ inEdges := sortedEdges(n.in)
+ inSum := inEdges.sum()
+ for _, in := range inEdges {
+ fmt.Fprintf(w, "%50s %s | %s\n", rpt.formatValue(in.weight),
+ percentage(in.weight, inSum), in.src.info.prettyName())
+ }
+
+ // Print current node.
+ flatSum += flat
+ fmt.Fprintf(w, "%10s %s %s %10s %s | %s\n",
+ rpt.formatValue(flat),
+ percentage(flat, rpt.total),
+ percentage(flatSum, rpt.total),
+ rpt.formatValue(cum),
+ percentage(cum, rpt.total),
+ name)
+
+ // Print outgoing edges.
+ outEdges := sortedEdges(n.out)
+ outSum := outEdges.sum()
+ for _, out := range outEdges {
+ fmt.Fprintf(w, "%50s %s | %s\n", rpt.formatValue(out.weight),
+ percentage(out.weight, outSum), out.dest.info.prettyName())
+ }
+ }
+ if len(g.ns) > 0 {
+ fmt.Fprintln(w, separator)
+ }
+ return nil
+}
+
+// printDOT prints an annotated callgraph in DOT format.
+func printDOT(w io.Writer, rpt *Report) error {
+ g, err := newGraph(rpt)
+ if err != nil {
+ return err
+ }
+
+ origCount, droppedNodes, droppedEdges := g.preprocess(rpt)
+
+ prof := rpt.prof
+ graphname := "unnamed"
+ if len(prof.Mapping) > 0 {
+ graphname = filepath.Base(prof.Mapping[0].File)
+ }
+ fmt.Fprintln(w, `digraph "`+graphname+`" {`)
+ fmt.Fprintln(w, `node [style=filled fillcolor="#f8f8f8"]`)
+ fmt.Fprintln(w, dotLegend(rpt, g, origCount, droppedNodes, droppedEdges))
+
+ if len(g.ns) == 0 {
+ fmt.Fprintln(w, "}")
+ return nil
+ }
+
+ // Make sure nodes have a unique consistent id.
+ nodeIndex := make(map[*node]int)
+ maxFlat := float64(g.ns[0].flat)
+ for i, n := range g.ns {
+ nodeIndex[n] = i + 1
+ if float64(n.flat) > maxFlat {
+ maxFlat = float64(n.flat)
+ }
+ }
+ var edges edgeList
+ for _, n := range g.ns {
+ node := dotNode(rpt, maxFlat, nodeIndex[n], n)
+ fmt.Fprintln(w, node)
+ if nodelets := dotNodelets(rpt, nodeIndex[n], n); nodelets != "" {
+ fmt.Fprint(w, nodelets)
+ }
+
+ // Collect outgoing edges.
+ for _, e := range n.out {
+ edges = append(edges, e)
+ }
+ }
+ // Sort edges by frequency as a hint to the graph layout engine.
+ sort.Sort(edges)
+ for _, e := range edges {
+ fmt.Fprintln(w, dotEdge(rpt, nodeIndex[e.src], nodeIndex[e.dest], e))
+ }
+ fmt.Fprintln(w, "}")
+ return nil
+}
+
+// percentage computes the percentage of total of a value, and encodes
+// it as a string. At least two digits of precision are printed.
+func percentage(value, total int64) string {
+ var ratio float64
+ if total != 0 {
+ ratio = float64(value) / float64(total) * 100
+ }
+ switch {
+ case ratio >= 99.95:
+ return " 100%"
+ case ratio >= 1.0:
+ return fmt.Sprintf("%5.2f%%", ratio)
+ default:
+ return fmt.Sprintf("%5.2g%%", ratio)
+ }
+}
+
+// dotLegend generates the overall graph label for a report in DOT format.
+func dotLegend(rpt *Report, g graph, origCount, droppedNodes, droppedEdges int) string {
+ label := legendLabels(rpt)
+ label = append(label, legendDetailLabels(rpt, g, origCount, droppedNodes, droppedEdges)...)
+ return fmt.Sprintf(`subgraph cluster_L { L [shape=box fontsize=32 label="%s\l"] }`, strings.Join(label, `\l`))
+}
+
+// legendLabels generates labels exclusive to graph visualization.
+func legendLabels(rpt *Report) []string {
+ prof := rpt.prof
+ o := rpt.options
+ var label []string
+ if len(prof.Mapping) > 0 {
+ if prof.Mapping[0].File != "" {
+ label = append(label, "File: "+filepath.Base(prof.Mapping[0].File))
+ }
+ if prof.Mapping[0].BuildID != "" {
+ label = append(label, "Build ID: "+prof.Mapping[0].BuildID)
+ }
+ }
+ if o.SampleType != "" {
+ label = append(label, "Type: "+o.SampleType)
+ }
+ if prof.TimeNanos != 0 {
+ const layout = "Jan 2, 2006 at 3:04pm (MST)"
+ label = append(label, "Time: "+time.Unix(0, prof.TimeNanos).Format(layout))
+ }
+ if prof.DurationNanos != 0 {
+ label = append(label, fmt.Sprintf("Duration: %v", time.Duration(prof.DurationNanos)))
+ }
+ return label
+}
+
+// legendDetailLabels generates labels common to graph and text visualization.
+func legendDetailLabels(rpt *Report, g graph, origCount, droppedNodes, droppedEdges int) []string {
+ nodeFraction := rpt.options.NodeFraction
+ edgeFraction := rpt.options.EdgeFraction
+ nodeCount := rpt.options.NodeCount
+
+ label := []string{}
+
+ var flatSum int64
+ for _, n := range g.ns {
+ flatSum = flatSum + n.flat
+ }
+
+ label = append(label, fmt.Sprintf("%s of %s total (%s)", rpt.formatValue(flatSum), rpt.formatValue(rpt.total), percentage(flatSum, rpt.total)))
+
+ if rpt.total > 0 {
+ if droppedNodes > 0 {
+ label = append(label, genLabel(droppedNodes, "node", "cum",
+ rpt.formatValue(int64(float64(rpt.total)*nodeFraction))))
+ }
+ if droppedEdges > 0 {
+ label = append(label, genLabel(droppedEdges, "edge", "freq",
+ rpt.formatValue(int64(float64(rpt.total)*edgeFraction))))
+ }
+ if nodeCount > 0 && nodeCount < origCount {
+ label = append(label, fmt.Sprintf("Showing top %d nodes out of %d (cum >= %s)",
+ nodeCount, origCount,
+ rpt.formatValue(g.ns[len(g.ns)-1].cum)))
+ }
+ }
+ return label
+}
+
+func genLabel(d int, n, l, f string) string {
+ if d > 1 {
+ n = n + "s"
+ }
+ return fmt.Sprintf("Dropped %d %s (%s <= %s)", d, n, l, f)
+}
+
+// dotNode generates a graph node in DOT format.
+func dotNode(rpt *Report, maxFlat float64, rIndex int, n *node) string {
+ flat, cum := n.flat, n.cum
+
+ labels := strings.Split(n.info.prettyName(), "::")
+ label := strings.Join(labels, `\n`) + `\n`
+
+ flatValue := rpt.formatValue(flat)
+ if flat > 0 {
+ label = label + fmt.Sprintf(`%s(%s)`,
+ flatValue,
+ strings.TrimSpace(percentage(flat, rpt.total)))
+ } else {
+ label = label + "0"
+ }
+ cumValue := flatValue
+ if cum != flat {
+ if flat > 0 {
+ label = label + `\n`
+ } else {
+ label = label + " "
+ }
+ cumValue = rpt.formatValue(cum)
+ label = label + fmt.Sprintf(`of %s(%s)`,
+ cumValue,
+ strings.TrimSpace(percentage(cum, rpt.total)))
+ }
+
+ // Scale font sizes from 8 to 24 based on percentage of flat frequency.
+ // Use non linear growth to emphasize the size difference.
+ baseFontSize, maxFontGrowth := 8, 16.0
+ fontSize := baseFontSize
+ if maxFlat > 0 && flat > 0 && float64(flat) <= maxFlat {
+ fontSize += int(math.Ceil(maxFontGrowth * math.Sqrt(float64(flat)/maxFlat)))
+ }
+ return fmt.Sprintf(`N%d [label="%s" fontsize=%d shape=box tooltip="%s (%s)"]`,
+ rIndex,
+ label,
+ fontSize, n.info.prettyName(), cumValue)
+}
+
+// dotEdge generates a graph edge in DOT format.
+func dotEdge(rpt *Report, from, to int, e *edgeInfo) string {
+ w := rpt.formatValue(e.weight)
+ attr := fmt.Sprintf(`label=" %s"`, w)
+ if rpt.total > 0 {
+ if weight := 1 + int(e.weight*100/rpt.total); weight > 1 {
+ attr = fmt.Sprintf(`%s weight=%d`, attr, weight)
+ }
+ if width := 1 + int(e.weight*5/rpt.total); width > 1 {
+ attr = fmt.Sprintf(`%s penwidth=%d`, attr, width)
+ }
+ }
+ arrow := "->"
+ if e.residual {
+ arrow = "..."
+ }
+ tooltip := fmt.Sprintf(`"%s %s %s (%s)"`,
+ e.src.info.prettyName(), arrow, e.dest.info.prettyName(), w)
+ attr = fmt.Sprintf(`%s tooltip=%s labeltooltip=%s`,
+ attr, tooltip, tooltip)
+
+ if e.residual {
+ attr = attr + ` style="dotted"`
+ }
+
+ if len(e.src.tags) > 0 {
+ // Separate children further if source has tags.
+ attr = attr + " minlen=2"
+ }
+ return fmt.Sprintf("N%d -> N%d [%s]", from, to, attr)
+}
+
+// dotNodelets generates the DOT boxes for the node tags.
+func dotNodelets(rpt *Report, rIndex int, n *node) (dot string) {
+ const maxNodelets = 4 // Number of nodelets for alphanumeric labels
+ const maxNumNodelets = 4 // Number of nodelets for numeric labels
+
+ var ts, nts tags
+ for _, t := range n.tags {
+ if t.unit == "" {
+ ts = append(ts, t)
+ } else {
+ nts = append(nts, t)
+ }
+ }
+
+ // Select the top maxNodelets alphanumeric labels by weight
+ sort.Sort(ts)
+ if len(ts) > maxNodelets {
+ ts = ts[:maxNodelets]
+ }
+ for i, t := range ts {
+ weight := rpt.formatValue(t.weight)
+ dot += fmt.Sprintf(`N%d_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", rIndex, i, t.name, weight)
+ dot += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="\L" labeltooltip="\L"]`+"\n", rIndex, rIndex, i, weight)
+ }
+
+ // Collapse numeric labels into maxNumNodelets buckets, of the form:
+ // 1MB..2MB, 3MB..5MB, ...
+ nts = collapseTags(nts, maxNumNodelets)
+ sort.Sort(nts)
+ for i, t := range nts {
+ weight := rpt.formatValue(t.weight)
+ dot += fmt.Sprintf(`NN%d_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", rIndex, i, t.name, weight)
+ dot += fmt.Sprintf(`N%d -> NN%d_%d [label=" %s" weight=100 tooltip="\L" labeltooltip="\L"]`+"\n", rIndex, rIndex, i, weight)
+ }
+
+ return dot
+}
+
+// graph summarizes a performance profile into a format that is
+// suitable for visualization.
+type graph struct {
+ ns nodes
+}
+
+// nodes is an ordered collection of graph nodes.
+type nodes []*node
+
+// tags represent sample annotations
+type tags []*tag
+type tagMap map[string]*tag
+
+type tag struct {
+ name string
+ unit string // Describe the value, "" for non-numeric tags
+ value int64
+ weight int64
+}
+
+func (t tags) Len() int { return len(t) }
+func (t tags) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t tags) Less(i, j int) bool {
+ if t[i].weight == t[j].weight {
+ return t[i].name < t[j].name
+ }
+ return t[i].weight > t[j].weight
+}
+
+// node is an entry on a profiling report. It represents a unique
+// program location. It can include multiple names to represent
+// inlined functions.
+type node struct {
+ info nodeInfo // Information associated to this entry.
+
+ // values associated to this node.
+ // flat is exclusive to this node, cum includes all descendents.
+ flat, cum int64
+
+ // in and out contains the nodes immediately reaching or reached by this nodes.
+ in, out edgeMap
+
+ // tags provide additional information about subsets of a sample.
+ tags tagMap
+}
+
+type nodeInfo struct {
+ name string
+ origName string
+ address uint64
+ file string
+ startLine, lineno int
+ inline bool
+ lowPriority bool
+ objfile string
+ parent *node // Used only if creating a calltree
+}
+
+func (n *node) addTags(s *profile.Sample, weight int64) {
+ // Add a tag with all string labels
+ var labels []string
+ for key, vals := range s.Label {
+ for _, v := range vals {
+ labels = append(labels, key+":"+v)
+ }
+ }
+ if len(labels) > 0 {
+ sort.Strings(labels)
+ l := n.tags.findOrAddTag(strings.Join(labels, `\n`), "", 0)
+ l.weight += weight
+ }
+
+ for key, nvals := range s.NumLabel {
+ for _, v := range nvals {
+ label := scaledValueLabel(v, key, "auto")
+ l := n.tags.findOrAddTag(label, key, v)
+ l.weight += weight
+ }
+ }
+}
+
+func (m tagMap) findOrAddTag(label, unit string, value int64) *tag {
+ if l := m[label]; l != nil {
+ return l
+ }
+ l := &tag{
+ name: label,
+ unit: unit,
+ value: value,
+ }
+ m[label] = l
+ return l
+}
+
+// collapseTags reduces the number of entries in a tagMap by merging
+// adjacent nodes into ranges. It uses a greedy approach to merge
+// starting with the entries with the lowest weight.
+func collapseTags(ts tags, count int) tags {
+ if len(ts) <= count {
+ return ts
+ }
+
+ sort.Sort(ts)
+ tagGroups := make([]tags, count)
+ for i, t := range ts[:count] {
+ tagGroups[i] = tags{t}
+ }
+ for _, t := range ts[count:] {
+ g, d := 0, tagDistance(t, tagGroups[0][0])
+ for i := 1; i < count; i++ {
+ if nd := tagDistance(t, tagGroups[i][0]); nd < d {
+ g, d = i, nd
+ }
+ }
+ tagGroups[g] = append(tagGroups[g], t)
+ }
+
+ var nts tags
+ for _, g := range tagGroups {
+ l, w := tagGroupLabel(g)
+ nts = append(nts, &tag{
+ name: l,
+ weight: w,
+ })
+ }
+ return nts
+}
+
+func tagDistance(t, u *tag) float64 {
+ v, _ := ScaleValue(u.value, u.unit, t.unit)
+ if v < float64(t.value) {
+ return float64(t.value) - v
+ }
+ return v - float64(t.value)
+}
+
+func tagGroupLabel(g tags) (string, int64) {
+ if len(g) == 1 {
+ t := g[0]
+ return scaledValueLabel(t.value, t.unit, "auto"), t.weight
+ }
+ min := g[0]
+ max := g[0]
+ w := min.weight
+ for _, t := range g[1:] {
+ if v, _ := ScaleValue(t.value, t.unit, min.unit); int64(v) < min.value {
+ min = t
+ }
+ if v, _ := ScaleValue(t.value, t.unit, max.unit); int64(v) > max.value {
+ max = t
+ }
+ w += t.weight
+ }
+ return scaledValueLabel(min.value, min.unit, "auto") + ".." +
+ scaledValueLabel(max.value, max.unit, "auto"), w
+}
+
+// sumNodes adds the flat and sum values on a report.
+func sumNodes(ns nodes) (flat int64, cum int64) {
+ for _, n := range ns {
+ flat += n.flat
+ cum += n.cum
+ }
+ return
+}
+
+type edgeMap map[*node]*edgeInfo
+
+// edgeInfo contains any attributes to be represented about edges in a graph/
+type edgeInfo struct {
+ src, dest *node
+ // The summary weight of the edge
+ weight int64
+ // residual edges connect nodes that were connected through a
+ // separate node, which has been removed from the report.
+ residual bool
+}
+
+// bumpWeight increases the weight of an edge. If there isn't such an
+// edge in the map one is created.
+func bumpWeight(from, to *node, w int64, residual bool) {
+ if from.out[to] != to.in[from] {
+ panic(fmt.Errorf("asymmetric edges %v %v", *from, *to))
+ }
+
+ if n := from.out[to]; n != nil {
+ n.weight += w
+ if n.residual && !residual {
+ n.residual = false
+ }
+ return
+ }
+
+ info := &edgeInfo{src: from, dest: to, weight: w, residual: residual}
+ from.out[to] = info
+ to.in[from] = info
+}
+
+// Output formats.
+const (
+ Proto = iota
+ Dot
+ Tags
+ Tree
+ Text
+ Raw
+ Dis
+ List
+ WebList
+ Callgrind
+)
+
+// Options are the formatting and filtering options used to generate a
+// profile.
+type Options struct {
+ OutputFormat int
+
+ CumSort bool
+ CallTree bool
+ PrintAddresses bool
+ DropNegative bool
+ Ratio float64
+
+ NodeCount int
+ NodeFraction float64
+ EdgeFraction float64
+
+ SampleType string
+ SampleUnit string // Unit for the sample data from the profile.
+ OutputUnit string // Units for data formatting in report.
+
+ Symbol *regexp.Regexp // Symbols to include on disassembly report.
+}
+
+// newGraph summarizes performance data from a profile into a graph.
+func newGraph(rpt *Report) (g graph, err error) {
+ prof := rpt.prof
+ o := rpt.options
+
+ // Generate a tree for graphical output if requested.
+ buildTree := o.CallTree && o.OutputFormat == Dot
+
+ locations := make(map[uint64][]nodeInfo)
+ for _, l := range prof.Location {
+ locations[l.ID] = newLocInfo(l)
+ }
+
+ nm := make(nodeMap)
+ for _, sample := range prof.Sample {
+ if sample.Location == nil {
+ continue
+ }
+
+ // Construct list of node names for sample.
+ var stack []nodeInfo
+ for _, loc := range sample.Location {
+ id := loc.ID
+ stack = append(stack, locations[id]...)
+ }
+
+ // Upfront pass to update the parent chains, to prevent the
+ // merging of nodes with different parents.
+ if buildTree {
+ var nn *node
+ for i := len(stack); i > 0; i-- {
+ n := &stack[i-1]
+ n.parent = nn
+ nn = nm.findOrInsertNode(*n)
+ }
+ }
+
+ leaf := nm.findOrInsertNode(stack[0])
+ weight := rpt.sampleValue(sample)
+ leaf.addTags(sample, weight)
+
+ // Aggregate counter data.
+ leaf.flat += weight
+ seen := make(map[*node]bool)
+ var nn *node
+ for _, s := range stack {
+ n := nm.findOrInsertNode(s)
+ if !seen[n] {
+ seen[n] = true
+ n.cum += weight
+
+ if nn != nil {
+ bumpWeight(n, nn, weight, false)
+ }
+ }
+ nn = n
+ }
+ }
+
+ // Collect new nodes into a report.
+ ns := make(nodes, 0, len(nm))
+ for _, n := range nm {
+ if rpt.options.DropNegative && n.flat < 0 {
+ continue
+ }
+ ns = append(ns, n)
+ }
+
+ return graph{ns}, nil
+}
+
+// Create a slice of formatted names for a location.
+func newLocInfo(l *profile.Location) []nodeInfo {
+ var objfile string
+
+ if m := l.Mapping; m != nil {
+ objfile = m.File
+ }
+
+ if len(l.Line) == 0 {
+ return []nodeInfo{
+ {
+ address: l.Address,
+ objfile: objfile,
+ },
+ }
+ }
+ var info []nodeInfo
+ numInlineFrames := len(l.Line) - 1
+ for li, line := range l.Line {
+ ni := nodeInfo{
+ address: l.Address,
+ lineno: int(line.Line),
+ inline: li < numInlineFrames,
+ objfile: objfile,
+ }
+
+ if line.Function != nil {
+ ni.name = line.Function.Name
+ ni.origName = line.Function.SystemName
+ ni.file = line.Function.Filename
+ ni.startLine = int(line.Function.StartLine)
+ }
+
+ info = append(info, ni)
+ }
+ return info
+}
+
+// nodeMap maps from a node info struct to a node. It is used to merge
+// report entries with the same info.
+type nodeMap map[nodeInfo]*node
+
+func (m nodeMap) findOrInsertNode(info nodeInfo) *node {
+ rr := m[info]
+ if rr == nil {
+ rr = &node{
+ info: info,
+ in: make(edgeMap),
+ out: make(edgeMap),
+ tags: make(map[string]*tag),
+ }
+ m[info] = rr
+ }
+ return rr
+}
+
+// preprocess does any required filtering/sorting according to the
+// report options. Returns the mapping from each node to any nodes
+// removed by path compression and statistics on the nodes/edges removed.
+func (g *graph) preprocess(rpt *Report) (origCount, droppedNodes, droppedEdges int) {
+ o := rpt.options
+
+ // Compute total weight of current set of nodes.
+ // This is <= rpt.total because of node filtering.
+ var totalValue int64
+ for _, n := range g.ns {
+ totalValue += n.flat
+ }
+
+ // Remove nodes with value <= total*nodeFraction
+ if nodeFraction := o.NodeFraction; nodeFraction > 0 {
+ var removed nodes
+ minValue := int64(float64(totalValue) * nodeFraction)
+ kept := make(nodes, 0, len(g.ns))
+ for _, n := range g.ns {
+ if n.cum < minValue {
+ removed = append(removed, n)
+ } else {
+ kept = append(kept, n)
+ tagsKept := make(map[string]*tag)
+ for s, t := range n.tags {
+ if t.weight >= minValue {
+ tagsKept[s] = t
+ }
+ }
+ n.tags = tagsKept
+ }
+ }
+ droppedNodes = len(removed)
+ removeNodes(removed, false, false)
+ g.ns = kept
+ }
+
+ // Remove edges below minimum frequency.
+ if edgeFraction := o.EdgeFraction; edgeFraction > 0 {
+ minEdge := int64(float64(totalValue) * edgeFraction)
+ for _, n := range g.ns {
+ for src, e := range n.in {
+ if e.weight < minEdge {
+ delete(n.in, src)
+ delete(src.out, n)
+ droppedEdges++
+ }
+ }
+ }
+ }
+
+ sortOrder := flatName
+ if o.CumSort {
+ // Force cum sorting for graph output, to preserve connectivity.
+ sortOrder = cumName
+ }
+
+ // Nodes that have flat==0 and a single in/out do not provide much
+ // information. Give them first chance to be removed. Do not consider edges
+ // from/to nodes that are expected to be removed.
+ maxNodes := o.NodeCount
+ if o.OutputFormat == Dot {
+ if maxNodes > 0 && maxNodes < len(g.ns) {
+ sortOrder = cumName
+ g.ns.sort(cumName)
+ cumCutoff := g.ns[maxNodes].cum
+ for _, n := range g.ns {
+ if n.flat == 0 {
+ if count := countEdges(n.out, cumCutoff); count > 1 {
+ continue
+ }
+ if count := countEdges(n.in, cumCutoff); count != 1 {
+ continue
+ }
+ n.info.lowPriority = true
+ }
+ }
+ }
+ }
+
+ g.ns.sort(sortOrder)
+ if maxNodes > 0 {
+ origCount = len(g.ns)
+ for index, nodes := 0, 0; index < len(g.ns); index++ {
+ nodes++
+ // For DOT output, count the tags as nodes since we will draw
+ // boxes for them.
+ if o.OutputFormat == Dot {
+ nodes += len(g.ns[index].tags)
+ }
+ if nodes > maxNodes {
+ // Trim to the top n nodes. Create dotted edges to bridge any
+ // broken connections.
+ removeNodes(g.ns[index:], true, true)
+ g.ns = g.ns[:index]
+ break
+ }
+ }
+ }
+ removeRedundantEdges(g.ns)
+
+ // Select best unit for profile output.
+ // Find the appropriate units for the smallest non-zero sample
+ if o.OutputUnit == "minimum" && len(g.ns) > 0 {
+ var maxValue, minValue int64
+
+ for _, n := range g.ns {
+ if n.flat > 0 && (minValue == 0 || n.flat < minValue) {
+ minValue = n.flat
+ }
+ if n.cum > maxValue {
+ maxValue = n.cum
+ }
+ }
+ if r := o.Ratio; r > 0 && r != 1 {
+ minValue = int64(float64(minValue) * r)
+ maxValue = int64(float64(maxValue) * r)
+ }
+
+ _, minUnit := ScaleValue(minValue, o.SampleUnit, "minimum")
+ _, maxUnit := ScaleValue(maxValue, o.SampleUnit, "minimum")
+
+ unit := minUnit
+ if minUnit != maxUnit && minValue*100 < maxValue && o.OutputFormat != Callgrind {
+ // Minimum and maximum values have different units. Scale
+ // minimum by 100 to use larger units, allowing minimum value to
+ // be scaled down to 0.01, except for callgrind reports since
+ // they can only represent integer values.
+ _, unit = ScaleValue(100*minValue, o.SampleUnit, "minimum")
+ }
+
+ if unit != "" {
+ o.OutputUnit = unit
+ } else {
+ o.OutputUnit = o.SampleUnit
+ }
+ }
+ return
+}
+
+// countEdges counts the number of edges below the specified cutoff.
+func countEdges(el edgeMap, cutoff int64) int {
+ count := 0
+ for _, e := range el {
+ if e.weight > cutoff {
+ count++
+ }
+ }
+ return count
+}
+
+// removeNodes removes nodes from a report, optionally bridging
+// connections between in/out edges and spreading out their weights
+// proportionally. residual marks new bridge edges as residual
+// (dotted).
+func removeNodes(toRemove nodes, bridge, residual bool) {
+ for _, n := range toRemove {
+ for ei := range n.in {
+ delete(ei.out, n)
+ }
+ if bridge {
+ for ei, wi := range n.in {
+ for eo, wo := range n.out {
+ var weight int64
+ if n.cum != 0 {
+ weight = int64(float64(wo.weight) * (float64(wi.weight) / float64(n.cum)))
+ }
+ bumpWeight(ei, eo, weight, residual)
+ }
+ }
+ }
+ for eo := range n.out {
+ delete(eo.in, n)
+ }
+ }
+}
+
+// removeRedundantEdges removes residual edges if the destination can
+// be reached through another path. This is done to simplify the graph
+// while preserving connectivity.
+func removeRedundantEdges(ns nodes) {
+ // Walk the nodes and outgoing edges in reverse order to prefer
+ // removing edges with the lowest weight.
+ for i := len(ns); i > 0; i-- {
+ n := ns[i-1]
+ in := sortedEdges(n.in)
+ for j := len(in); j > 0; j-- {
+ if e := in[j-1]; e.residual && isRedundant(e) {
+ delete(e.src.out, e.dest)
+ delete(e.dest.in, e.src)
+ }
+ }
+ }
+}
+
+// isRedundant determines if an edge can be removed without impacting
+// connectivity of the whole graph. This is implemented by checking if the
+// nodes have a common ancestor after removing the edge.
+func isRedundant(e *edgeInfo) bool {
+ destPred := predecessors(e, e.dest)
+ if len(destPred) == 1 {
+ return false
+ }
+ srcPred := predecessors(e, e.src)
+
+ for n := range srcPred {
+ if destPred[n] && n != e.dest {
+ return true
+ }
+ }
+ return false
+}
+
+// predecessors collects all the predecessors to node n, excluding edge e.
+func predecessors(e *edgeInfo, n *node) map[*node]bool {
+ seen := map[*node]bool{n: true}
+ queue := []*node{n}
+ for len(queue) > 0 {
+ n := queue[0]
+ queue = queue[1:]
+ for _, ie := range n.in {
+ if e == ie || seen[ie.src] {
+ continue
+ }
+ seen[ie.src] = true
+ queue = append(queue, ie.src)
+ }
+ }
+ return seen
+}
+
+// nodeSorter is a mechanism used to allow a report to be sorted
+// in different ways.
+type nodeSorter struct {
+ rs nodes
+ less func(i, j int) bool
+}
+
+func (s nodeSorter) Len() int { return len(s.rs) }
+func (s nodeSorter) Swap(i, j int) { s.rs[i], s.rs[j] = s.rs[j], s.rs[i] }
+func (s nodeSorter) Less(i, j int) bool { return s.less(i, j) }
+
+type nodeOrder int
+
+const (
+ flatName nodeOrder = iota
+ flatCumName
+ cumName
+ nameOrder
+ fileOrder
+ addressOrder
+)
+
+// sort reorders the entries in a report based on the specified
+// ordering criteria. The result is sorted in decreasing order for
+// numeric quantities, alphabetically for text, and increasing for
+// addresses.
+func (ns nodes) sort(o nodeOrder) error {
+ var s nodeSorter
+
+ switch o {
+ case flatName:
+ s = nodeSorter{ns,
+ func(i, j int) bool {
+ if iv, jv := ns[i].flat, ns[j].flat; iv != jv {
+ return iv > jv
+ }
+ if ns[i].info.prettyName() != ns[j].info.prettyName() {
+ return ns[i].info.prettyName() < ns[j].info.prettyName()
+ }
+ iv, jv := ns[i].cum, ns[j].cum
+ return iv > jv
+ },
+ }
+ case flatCumName:
+ s = nodeSorter{ns,
+ func(i, j int) bool {
+ if iv, jv := ns[i].flat, ns[j].flat; iv != jv {
+ return iv > jv
+ }
+ if iv, jv := ns[i].cum, ns[j].cum; iv != jv {
+ return iv > jv
+ }
+ return ns[i].info.prettyName() < ns[j].info.prettyName()
+ },
+ }
+ case cumName:
+ s = nodeSorter{ns,
+ func(i, j int) bool {
+ if ns[i].info.lowPriority != ns[j].info.lowPriority {
+ return ns[j].info.lowPriority
+ }
+ if iv, jv := ns[i].cum, ns[j].cum; iv != jv {
+ return iv > jv
+ }
+ if ns[i].info.prettyName() != ns[j].info.prettyName() {
+ return ns[i].info.prettyName() < ns[j].info.prettyName()
+ }
+ iv, jv := ns[i].flat, ns[j].flat
+ return iv > jv
+ },
+ }
+ case nameOrder:
+ s = nodeSorter{ns,
+ func(i, j int) bool {
+ return ns[i].info.name < ns[j].info.name
+ },
+ }
+ case fileOrder:
+ s = nodeSorter{ns,
+ func(i, j int) bool {
+ return ns[i].info.file < ns[j].info.file
+ },
+ }
+ case addressOrder:
+ s = nodeSorter{ns,
+ func(i, j int) bool {
+ return ns[i].info.address < ns[j].info.address
+ },
+ }
+ default:
+ return fmt.Errorf("report: unrecognized sort ordering: %d", o)
+ }
+ sort.Sort(s)
+ return nil
+}
+
+type edgeList []*edgeInfo
+
+// sortedEdges return a slice of the edges in the map, sorted for
+// visualization. The sort order is first based on the edge weight
+// (higher-to-lower) and then by the node names to avoid flakiness.
+func sortedEdges(edges map[*node]*edgeInfo) edgeList {
+ el := make(edgeList, 0, len(edges))
+ for _, w := range edges {
+ el = append(el, w)
+ }
+
+ sort.Sort(el)
+ return el
+}
+
+func (el edgeList) Len() int {
+ return len(el)
+}
+
+func (el edgeList) Less(i, j int) bool {
+ if el[i].weight != el[j].weight {
+ return el[i].weight > el[j].weight
+ }
+
+ from1 := el[i].src.info.prettyName()
+ from2 := el[j].src.info.prettyName()
+ if from1 != from2 {
+ return from1 < from2
+ }
+
+ to1 := el[i].dest.info.prettyName()
+ to2 := el[j].dest.info.prettyName()
+
+ return to1 < to2
+}
+
+func (el edgeList) Swap(i, j int) {
+ el[i], el[j] = el[j], el[i]
+}
+
+func (el edgeList) sum() int64 {
+ var ret int64
+ for _, e := range el {
+ ret += e.weight
+ }
+ return ret
+}
+
+// ScaleValue reformats a value from a unit to a different unit.
+func ScaleValue(value int64, fromUnit, toUnit string) (sv float64, su string) {
+ // Avoid infinite recursion on overflow.
+ if value < 0 && -value > 0 {
+ v, u := ScaleValue(-value, fromUnit, toUnit)
+ return -v, u
+ }
+ if m, u, ok := memoryLabel(value, fromUnit, toUnit); ok {
+ return m, u
+ }
+ if t, u, ok := timeLabel(value, fromUnit, toUnit); ok {
+ return t, u
+ }
+ // Skip non-interesting units.
+ switch toUnit {
+ case "count", "sample", "unit", "minimum":
+ return float64(value), ""
+ default:
+ return float64(value), toUnit
+ }
+}
+
+func scaledValueLabel(value int64, fromUnit, toUnit string) string {
+ v, u := ScaleValue(value, fromUnit, toUnit)
+
+ sv := strings.TrimSuffix(fmt.Sprintf("%.2f", v), ".00")
+ if sv == "0" || sv == "-0" {
+ return "0"
+ }
+ return sv + u
+}
+
+func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
+ fromUnit = strings.TrimSuffix(strings.ToLower(fromUnit), "s")
+ toUnit = strings.TrimSuffix(strings.ToLower(toUnit), "s")
+
+ switch fromUnit {
+ case "byte", "b":
+ case "kilobyte", "kb":
+ value *= 1024
+ case "megabyte", "mb":
+ value *= 1024 * 1024
+ case "gigabyte", "gb":
+ value *= 1024 * 1024 * 1024
+ default:
+ return 0, "", false
+ }
+
+ if toUnit == "minimum" || toUnit == "auto" {
+ switch {
+ case value < 1024:
+ toUnit = "b"
+ case value < 1024*1024:
+ toUnit = "kb"
+ case value < 1024*1024*1024:
+ toUnit = "mb"
+ default:
+ toUnit = "gb"
+ }
+ }
+
+ var output float64
+ switch toUnit {
+ default:
+ output, toUnit = float64(value), "B"
+ case "kb", "kbyte", "kilobyte":
+ output, toUnit = float64(value)/1024, "kB"
+ case "mb", "mbyte", "megabyte":
+ output, toUnit = float64(value)/(1024*1024), "MB"
+ case "gb", "gbyte", "gigabyte":
+ output, toUnit = float64(value)/(1024*1024*1024), "GB"
+ }
+ return output, toUnit, true
+}
+
+func timeLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
+ fromUnit = strings.ToLower(fromUnit)
+ if len(fromUnit) > 2 {
+ fromUnit = strings.TrimSuffix(fromUnit, "s")
+ }
+
+ toUnit = strings.ToLower(toUnit)
+ if len(toUnit) > 2 {
+ toUnit = strings.TrimSuffix(toUnit, "s")
+ }
+
+ var d time.Duration
+ switch fromUnit {
+ case "nanosecond", "ns":
+ d = time.Duration(value) * time.Nanosecond
+ case "microsecond":
+ d = time.Duration(value) * time.Microsecond
+ case "millisecond", "ms":
+ d = time.Duration(value) * time.Millisecond
+ case "second", "sec":
+ d = time.Duration(value) * time.Second
+ case "cycle":
+ return float64(value), "", true
+ default:
+ return 0, "", false
+ }
+
+ if toUnit == "minimum" || toUnit == "auto" {
+ switch {
+ case d < 1*time.Microsecond:
+ toUnit = "ns"
+ case d < 1*time.Millisecond:
+ toUnit = "us"
+ case d < 1*time.Second:
+ toUnit = "ms"
+ case d < 1*time.Minute:
+ toUnit = "sec"
+ case d < 1*time.Hour:
+ toUnit = "min"
+ case d < 24*time.Hour:
+ toUnit = "hour"
+ case d < 15*24*time.Hour:
+ toUnit = "day"
+ case d < 120*24*time.Hour:
+ toUnit = "week"
+ default:
+ toUnit = "year"
+ }
+ }
+
+ var output float64
+ dd := float64(d)
+ switch toUnit {
+ case "ns", "nanosecond":
+ output, toUnit = dd/float64(time.Nanosecond), "ns"
+ case "us", "microsecond":
+ output, toUnit = dd/float64(time.Microsecond), "us"
+ case "ms", "millisecond":
+ output, toUnit = dd/float64(time.Millisecond), "ms"
+ case "min", "minute":
+ output, toUnit = dd/float64(time.Minute), "mins"
+ case "hour", "hr":
+ output, toUnit = dd/float64(time.Hour), "hrs"
+ case "day":
+ output, toUnit = dd/float64(24*time.Hour), "days"
+ case "week", "wk":
+ output, toUnit = dd/float64(7*24*time.Hour), "wks"
+ case "year", "yr":
+ output, toUnit = dd/float64(365*7*24*time.Hour), "yrs"
+ default:
+ fallthrough
+ case "sec", "second", "s":
+ output, toUnit = dd/float64(time.Second), "s"
+ }
+ return output, toUnit, true
+}
+
+// prettyName determines the printable name to be used for a node.
+func (info *nodeInfo) prettyName() string {
+ var name string
+ if info.address != 0 {
+ name = fmt.Sprintf("%016x", info.address)
+ }
+
+ if info.name != "" {
+ name = name + " " + info.name
+ }
+
+ if info.file != "" {
+ name += " " + trimPath(info.file)
+ if info.lineno != 0 {
+ name += fmt.Sprintf(":%d", info.lineno)
+ }
+ }
+
+ if info.inline {
+ name = name + " (inline)"
+ }
+
+ if name = strings.TrimSpace(name); name == "" && info.objfile != "" {
+ name = "[" + filepath.Base(info.objfile) + "]"
+ }
+ return name
+}
+
+// New builds a new report indexing the sample values interpreting the
+// samples with the provided function.
+func New(prof *profile.Profile, options Options, value func(s *profile.Sample) int64, unit string) *Report {
+ o := &options
+ if o.SampleUnit == "" {
+ o.SampleUnit = unit
+ }
+ format := func(v int64) string {
+ if r := o.Ratio; r > 0 && r != 1 {
+ fv := float64(v) * r
+ v = int64(fv)
+ }
+ return scaledValueLabel(v, o.SampleUnit, o.OutputUnit)
+ }
+ return &Report{prof, computeTotal(prof, value), o, value, format}
+}
+
+// NewDefault builds a new report indexing the sample values with the
+// last value available.
+func NewDefault(prof *profile.Profile, options Options) *Report {
+ index := len(prof.SampleType) - 1
+ o := &options
+ if o.SampleUnit == "" {
+ o.SampleUnit = strings.ToLower(prof.SampleType[index].Unit)
+ }
+ value := func(s *profile.Sample) int64 {
+ return s.Value[index]
+ }
+ format := func(v int64) string {
+ if r := o.Ratio; r > 0 && r != 1 {
+ fv := float64(v) * r
+ v = int64(fv)
+ }
+ return scaledValueLabel(v, o.SampleUnit, o.OutputUnit)
+ }
+ return &Report{prof, computeTotal(prof, value), o, value, format}
+}
+
+func computeTotal(prof *profile.Profile, value func(s *profile.Sample) int64) int64 {
+ var ret int64
+ for _, sample := range prof.Sample {
+ ret += value(sample)
+ }
+ return ret
+}
+
+// Report contains the data and associated routines to extract a
+// report from a profile.
+type Report struct {
+ prof *profile.Profile
+ total int64
+ options *Options
+ sampleValue func(*profile.Sample) int64
+ formatValue func(int64) string
+}
diff --git a/src/cmd/pprof/internal/report/source.go b/src/cmd/pprof/internal/report/source.go
new file mode 100644
index 0000000000..7ab7e3861f
--- /dev/null
+++ b/src/cmd/pprof/internal/report/source.go
@@ -0,0 +1,454 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package report
+
+// This file contains routines related to the generation of annotated
+// source listings.
+
+import (
+ "bufio"
+ "fmt"
+ "html/template"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cmd/pprof/internal/plugin"
+)
+
+// printSource prints an annotated source listing, include all
+// functions with samples that match the regexp rpt.options.symbol.
+// The sources are sorted by function name and then by filename to
+// eliminate potential nondeterminism.
+func printSource(w io.Writer, rpt *Report) error {
+ o := rpt.options
+ g, err := newGraph(rpt)
+ if err != nil {
+ return err
+ }
+
+ // Identify all the functions that match the regexp provided.
+ // Group nodes for each matching function.
+ var functions nodes
+ functionNodes := make(map[string]nodes)
+ for _, n := range g.ns {
+ if !o.Symbol.MatchString(n.info.name) {
+ continue
+ }
+ if functionNodes[n.info.name] == nil {
+ functions = append(functions, n)
+ }
+ functionNodes[n.info.name] = append(functionNodes[n.info.name], n)
+ }
+ functions.sort(nameOrder)
+
+ fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total))
+ for _, fn := range functions {
+ name := fn.info.name
+
+ // Identify all the source files associated to this function.
+ // Group nodes for each source file.
+ var sourceFiles nodes
+ fileNodes := make(map[string]nodes)
+ for _, n := range functionNodes[name] {
+ if n.info.file == "" {
+ continue
+ }
+ if fileNodes[n.info.file] == nil {
+ sourceFiles = append(sourceFiles, n)
+ }
+ fileNodes[n.info.file] = append(fileNodes[n.info.file], n)
+ }
+
+ if len(sourceFiles) == 0 {
+ fmt.Printf("No source information for %s\n", name)
+ continue
+ }
+
+ sourceFiles.sort(fileOrder)
+
+ // Print each file associated with this function.
+ for _, fl := range sourceFiles {
+ filename := fl.info.file
+ fns := fileNodes[filename]
+ flatSum, cumSum := sumNodes(fns)
+
+ fnodes, path, err := getFunctionSource(name, filename, fns, 0, 0)
+ fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, path)
+ fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
+ rpt.formatValue(flatSum), rpt.formatValue(cumSum),
+ percentage(cumSum, rpt.total))
+
+ if err != nil {
+ fmt.Fprintf(w, " Error: %v\n", err)
+ continue
+ }
+
+ for _, fn := range fnodes {
+ fmt.Fprintf(w, "%10s %10s %6d:%s\n", valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt), fn.info.lineno, fn.info.name)
+ }
+ }
+ }
+ return nil
+}
+
+// printWebSource prints an annotated source listing, include all
+// functions with samples that match the regexp rpt.options.symbol.
+func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
+ o := rpt.options
+ g, err := newGraph(rpt)
+ if err != nil {
+ return err
+ }
+
+ // If the regexp source can be parsed as an address, also match
+ // functions that land on that address.
+ var address *uint64
+ if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil {
+ address = &hex
+ }
+
+ // Extract interesting symbols from binary files in the profile and
+ // classify samples per symbol.
+ symbols := symbolsFromBinaries(rpt.prof, g, o.Symbol, address, obj)
+ symNodes := nodesPerSymbol(g.ns, symbols)
+
+ // Sort symbols for printing.
+ var syms objSymbols
+ for s := range symNodes {
+ syms = append(syms, s)
+ }
+ sort.Sort(syms)
+
+ if len(syms) == 0 {
+ return fmt.Errorf("no samples found on routines matching: %s", o.Symbol.String())
+ }
+
+ printHeader(w, rpt)
+ for _, s := range syms {
+ name := s.sym.Name[0]
+ // Identify sources associated to a symbol by examining
+ // symbol samples. Classify samples per source file.
+ var sourceFiles nodes
+ fileNodes := make(map[string]nodes)
+ for _, n := range symNodes[s] {
+ if n.info.file == "" {
+ continue
+ }
+ if fileNodes[n.info.file] == nil {
+ sourceFiles = append(sourceFiles, n)
+ }
+ fileNodes[n.info.file] = append(fileNodes[n.info.file], n)
+ }
+
+ if len(sourceFiles) == 0 {
+ fmt.Printf("No source information for %s\n", name)
+ continue
+ }
+
+ sourceFiles.sort(fileOrder)
+
+ // Print each file associated with this function.
+ for _, fl := range sourceFiles {
+ filename := fl.info.file
+ fns := fileNodes[filename]
+
+ asm := assemblyPerSourceLine(symbols, fns, filename, obj)
+ start, end := sourceCoordinates(asm)
+
+ fnodes, path, err := getFunctionSource(name, filename, fns, start, end)
+ if err != nil {
+ fnodes, path = getMissingFunctionSource(filename, asm, start, end)
+ }
+
+ flatSum, cumSum := sumNodes(fnodes)
+ printFunctionHeader(w, name, path, flatSum, cumSum, rpt)
+ for _, fn := range fnodes {
+ printFunctionSourceLine(w, fn, asm[fn.info.lineno], rpt)
+ }
+ printFunctionClosing(w)
+ }
+ }
+ printPageClosing(w)
+ return nil
+}
+
+// sourceCoordinates returns the lowest and highest line numbers from
+// a set of assembly statements.
+func sourceCoordinates(asm map[int]nodes) (start, end int) {
+ for l := range asm {
+ if start == 0 || l < start {
+ start = l
+ }
+ if end == 0 || l > end {
+ end = l
+ }
+ }
+ return start, end
+}
+
+// assemblyPerSourceLine disassembles the binary containing a symbol
+// and classifies the assembly instructions according to its
+// corresponding source line, annotating them with a set of samples.
+func assemblyPerSourceLine(objSyms []*objSymbol, rs nodes, src string, obj plugin.ObjTool) map[int]nodes {
+ assembly := make(map[int]nodes)
+ // Identify symbol to use for this collection of samples.
+ o := findMatchingSymbol(objSyms, rs)
+ if o == nil {
+ return assembly
+ }
+
+ // Extract assembly for matched symbol
+ insns, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End)
+ if err != nil {
+ return assembly
+ }
+
+ srcBase := filepath.Base(src)
+ anodes := annotateAssembly(insns, rs, o.base)
+ var lineno = 0
+ for _, an := range anodes {
+ if filepath.Base(an.info.file) == srcBase {
+ lineno = an.info.lineno
+ }
+ if lineno != 0 {
+ assembly[lineno] = append(assembly[lineno], an)
+ }
+ }
+
+ return assembly
+}
+
+// findMatchingSymbol looks for the symbol that corresponds to a set
+// of samples, by comparing their addresses.
+func findMatchingSymbol(objSyms []*objSymbol, ns nodes) *objSymbol {
+ for _, n := range ns {
+ for _, o := range objSyms {
+ if filepath.Base(o.sym.File) == n.info.objfile &&
+ o.sym.Start <= n.info.address-o.base &&
+ n.info.address-o.base <= o.sym.End {
+ return o
+ }
+ }
+ }
+ return nil
+}
+
+// printHeader prints the page header for a weblist report.
+func printHeader(w io.Writer, rpt *Report) {
+ fmt.Fprintln(w, weblistPageHeader)
+
+ var labels []string
+ for _, l := range legendLabels(rpt) {
+ labels = append(labels, template.HTMLEscapeString(l))
+ }
+
+ fmt.Fprintf(w, `<div class="legend">%s<br>Total: %s</div>`,
+ strings.Join(labels, "<br>\n"),
+ rpt.formatValue(rpt.total),
+ )
+}
+
+// printFunctionHeader prints a function header for a weblist report.
+func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, rpt *Report) {
+ fmt.Fprintf(w, `<h1>%s</h1>%s
+<pre onClick="pprof_toggle_asm(event)">
+ Total: %10s %10s (flat, cum) %s
+`,
+ template.HTMLEscapeString(name), template.HTMLEscapeString(path),
+ rpt.formatValue(flatSum), rpt.formatValue(cumSum),
+ percentage(cumSum, rpt.total))
+}
+
+// printFunctionSourceLine prints a source line and the corresponding assembly.
+func printFunctionSourceLine(w io.Writer, fn *node, assembly nodes, rpt *Report) {
+ if len(assembly) == 0 {
+ fmt.Fprintf(w,
+ "<span class=line> %6d</span> <span class=nop> %10s %10s %s </span>\n",
+ fn.info.lineno,
+ valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt),
+ template.HTMLEscapeString(fn.info.name))
+ return
+ }
+
+ fmt.Fprintf(w,
+ "<span class=line> %6d</span> <span class=deadsrc> %10s %10s %s </span>",
+ fn.info.lineno,
+ valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt),
+ template.HTMLEscapeString(fn.info.name))
+ fmt.Fprint(w, "<span class=asm>")
+ for _, an := range assembly {
+ var fileline string
+ class := "disasmloc"
+ if an.info.file != "" {
+ fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(an.info.file), an.info.lineno)
+ if an.info.lineno != fn.info.lineno {
+ class = "unimportant"
+ }
+ }
+ fmt.Fprintf(w, " %8s %10s %10s %8x: %-48s <span class=%s>%s</span>\n", "",
+ valueOrDot(an.flat, rpt), valueOrDot(an.cum, rpt),
+ an.info.address,
+ template.HTMLEscapeString(an.info.name),
+ class,
+ template.HTMLEscapeString(fileline))
+ }
+ fmt.Fprintln(w, "</span>")
+}
+
+// printFunctionClosing prints the end of a function in a weblist report.
+func printFunctionClosing(w io.Writer) {
+ fmt.Fprintln(w, "</pre>")
+}
+
+// printPageClosing prints the end of the page in a weblist report.
+func printPageClosing(w io.Writer) {
+ fmt.Fprintln(w, weblistPageClosing)
+}
+
+// getFunctionSource collects the sources of a function from a source
+// file and annotates it with the samples in fns. Returns the sources
+// as nodes, using the info.name field to hold the source code.
+func getFunctionSource(fun, file string, fns nodes, start, end int) (nodes, string, error) {
+ f, file, err := adjustSourcePath(file)
+ if err != nil {
+ return nil, file, err
+ }
+
+ lineNodes := make(map[int]nodes)
+
+ // Collect source coordinates from profile.
+ const margin = 5 // Lines before first/after last sample.
+ if start == 0 {
+ if fns[0].info.startLine != 0 {
+ start = fns[0].info.startLine
+ } else {
+ start = fns[0].info.lineno - margin
+ }
+ } else {
+ start -= margin
+ }
+ if end == 0 {
+ end = fns[0].info.lineno
+ }
+ end += margin
+ for _, n := range fns {
+ lineno := n.info.lineno
+ nodeStart := n.info.startLine
+ if nodeStart == 0 {
+ nodeStart = lineno - margin
+ }
+ nodeEnd := lineno + margin
+ if nodeStart < start {
+ start = nodeStart
+ } else if nodeEnd > end {
+ end = nodeEnd
+ }
+ lineNodes[lineno] = append(lineNodes[lineno], n)
+ }
+
+ var src nodes
+ buf := bufio.NewReader(f)
+ lineno := 1
+ for {
+ line, err := buf.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, file, err
+ }
+ if line == "" {
+ // end was at or past EOF; that's okay
+ break
+ }
+ }
+ if lineno >= start {
+ flat, cum := sumNodes(lineNodes[lineno])
+
+ src = append(src, &node{
+ info: nodeInfo{
+ name: strings.TrimRight(line, "\n"),
+ lineno: lineno,
+ },
+ flat: flat,
+ cum: cum,
+ })
+ }
+ lineno++
+ if lineno > end {
+ break
+ }
+ }
+ return src, file, nil
+}
+
+// getMissingFunctionSource creates a dummy function body to point to
+// the source file and annotates it with the samples in asm.
+func getMissingFunctionSource(filename string, asm map[int]nodes, start, end int) (nodes, string) {
+ var fnodes nodes
+ for i := start; i <= end; i++ {
+ lrs := asm[i]
+ if len(lrs) == 0 {
+ continue
+ }
+ flat, cum := sumNodes(lrs)
+ fnodes = append(fnodes, &node{
+ info: nodeInfo{
+ name: "???",
+ lineno: i,
+ },
+ flat: flat,
+ cum: cum,
+ })
+ }
+ return fnodes, filename
+}
+
+// adjustSourcePath adjusts the path for a source file by trimming
+// known prefixes and searching for the file on all parents of the
+// current working dir.
+func adjustSourcePath(path string) (*os.File, string, error) {
+ path = trimPath(path)
+ f, err := os.Open(path)
+ if err == nil {
+ return f, path, nil
+ }
+
+ if dir, wderr := os.Getwd(); wderr == nil {
+ for {
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ break
+ }
+ if f, err := os.Open(filepath.Join(parent, path)); err == nil {
+ return f, filepath.Join(parent, path), nil
+ }
+
+ dir = parent
+ }
+ }
+
+ return nil, path, err
+}
+
+// trimPath cleans up a path by removing prefixes that are commonly
+// found on profiles.
+func trimPath(path string) string {
+ basePaths := []string{
+ "/proc/self/cwd/./",
+ "/proc/self/cwd/",
+ }
+
+ sPath := filepath.ToSlash(path)
+
+ for _, base := range basePaths {
+ if strings.HasPrefix(sPath, base) {
+ return filepath.FromSlash(sPath[len(base):])
+ }
+ }
+ return path
+}
diff --git a/src/cmd/pprof/internal/report/source_html.go b/src/cmd/pprof/internal/report/source_html.go
new file mode 100644
index 0000000000..267fabdc4b
--- /dev/null
+++ b/src/cmd/pprof/internal/report/source_html.go
@@ -0,0 +1,77 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package report
+
+const weblistPageHeader = `
+<!DOCTYPE html>
+<html>
+<head>
+<title>Pprof listing</title>
+<style type="text/css">
+body {
+font-family: sans-serif;
+}
+h1 {
+ font-size: 1.5em;
+ margin-bottom: 4px;
+}
+.legend {
+ font-size: 1.25em;
+}
+.line {
+color: #aaaaaa;
+}
+.nop {
+color: #aaaaaa;
+}
+.unimportant {
+color: #cccccc;
+}
+.disasmloc {
+color: #000000;
+}
+.deadsrc {
+cursor: pointer;
+}
+.deadsrc:hover {
+background-color: #eeeeee;
+}
+.livesrc {
+color: #0000ff;
+cursor: pointer;
+}
+.livesrc:hover {
+background-color: #eeeeee;
+}
+.asm {
+color: #008800;
+display: none;
+}
+</style>
+<script type="text/javascript">
+function pprof_toggle_asm(e) {
+ var target;
+ if (!e) e = window.event;
+ if (e.target) target = e.target;
+ else if (e.srcElement) target = e.srcElement;
+
+ if (target) {
+ var asm = target.nextSibling;
+ if (asm && asm.className == "asm") {
+ asm.style.display = (asm.style.display == "block" ? "" : "block");
+ e.preventDefault();
+ return false;
+ }
+ }
+}
+</script>
+</head>
+<body>
+`
+
+const weblistPageClosing = `
+</body>
+</html>
+`
diff --git a/src/cmd/pprof/internal/svg/svg.go b/src/cmd/pprof/internal/svg/svg.go
new file mode 100644
index 0000000000..04f6ff1870
--- /dev/null
+++ b/src/cmd/pprof/internal/svg/svg.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package svg provides tools related to handling of SVG files
+package svg
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+)
+
+var (
+ viewBox = regexp.MustCompile(`<svg\s*width="[^"]+"\s*height="[^"]+"\s*viewBox="[^"]+"`)
+ graphId = regexp.MustCompile(`<g id="graph\d"`)
+ svgClose = regexp.MustCompile(`</svg>`)
+)
+
+// Massage enhances the SVG output from DOT to provide better
+// panning inside a web browser. It uses the SVGPan library, which is
+// included directly.
+func Massage(in bytes.Buffer) string {
+ svg := string(in.Bytes())
+
+ // Work around for dot bug which misses quoting some ampersands,
+ // resulting on unparsable SVG.
+ svg = strings.Replace(svg, "&;", "&amp;;", -1)
+
+ //Dot's SVG output is
+ //
+ // <svg width="___" height="___"
+ // viewBox="___" xmlns=...>
+ // <g id="graph0" transform="...">
+ // ...
+ // </g>
+ // </svg>
+ //
+ // Change it to
+ //
+ // <svg width="100%" height="100%"
+ // xmlns=...>
+ // <script>...</script>
+ // <g id="viewport" transform="translate(0,0)">
+ // <g id="graph0" transform="...">
+ // ...
+ // </g>
+ // </g>
+ // </svg>
+
+ if loc := viewBox.FindStringIndex(svg); loc != nil {
+ svg = svg[:loc[0]] +
+ `<svg width="100%" height="100%"` +
+ svg[loc[1]:]
+ }
+
+ if loc := graphId.FindStringIndex(svg); loc != nil {
+ svg = svg[:loc[0]] +
+ `<script type="text/ecmascript"><![CDATA[` + svgPanJS + `]]></script>` +
+ `<g id="viewport" transform="scale(0.5,0.5) translate(0,0)">` +
+ svg[loc[0]:]
+ }
+
+ if loc := svgClose.FindStringIndex(svg); loc != nil {
+ svg = svg[:loc[0]] +
+ `</g>` +
+ svg[loc[0]:]
+ }
+
+ return svg
+}
diff --git a/src/cmd/pprof/internal/svg/svgpan.go b/src/cmd/pprof/internal/svg/svgpan.go
new file mode 100644
index 0000000000..4975b103e3
--- /dev/null
+++ b/src/cmd/pprof/internal/svg/svgpan.go
@@ -0,0 +1,291 @@
+// SVG pan and zoom library.
+// See copyright notice in string constant below.
+
+package svg
+
+// https://www.cyberz.org/projects/SVGPan/SVGPan.js
+
+const svgPanJS = `
+/**
+ * SVGPan library 1.2.1
+ * ======================
+ *
+ * Given an unique existing element with id "viewport" (or when missing, the first g
+ * element), including the the library into any SVG adds the following capabilities:
+ *
+ * - Mouse panning
+ * - Mouse zooming (using the wheel)
+ * - Object dragging
+ *
+ * You can configure the behaviour of the pan/zoom/drag with the variables
+ * listed in the CONFIGURATION section of this file.
+ *
+ * Known issues:
+ *
+ * - Zooming (while panning) on Safari has still some issues
+ *
+ * Releases:
+ *
+ * 1.2.1, Mon Jul 4 00:33:18 CEST 2011, Andrea Leofreddi
+ * - Fixed a regression with mouse wheel (now working on Firefox 5)
+ * - Working with viewBox attribute (#4)
+ * - Added "use strict;" and fixed resulting warnings (#5)
+ * - Added configuration variables, dragging is disabled by default (#3)
+ *
+ * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
+ * Fixed a bug with browser mouse handler interaction
+ *
+ * 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui
+ * Updated the zoom code to support the mouse wheel on Safari/Chrome
+ *
+ * 1.0, Andrea Leofreddi
+ * First release
+ *
+ * This code is licensed under the following BSD license:
+ *
+ * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ` + "``AS IS''" + ` AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are those of the
+ * authors and should not be interpreted as representing official policies, either expressed
+ * or implied, of Andrea Leofreddi.
+ */
+
+"use strict";
+
+/// CONFIGURATION
+/// ====>
+
+var enablePan = 1; // 1 or 0: enable or disable panning (default enabled)
+var enableZoom = 1; // 1 or 0: enable or disable zooming (default enabled)
+var enableDrag = 0; // 1 or 0: enable or disable dragging (default disabled)
+
+/// <====
+/// END OF CONFIGURATION
+
+var root = document.documentElement;
+
+var state = 'none', svgRoot, stateTarget, stateOrigin, stateTf;
+
+setupHandlers(root);
+
+/**
+ * Register handlers
+ */
+function setupHandlers(root){
+ setAttributes(root, {
+ "onmouseup" : "handleMouseUp(evt)",
+ "onmousedown" : "handleMouseDown(evt)",
+ "onmousemove" : "handleMouseMove(evt)",
+ //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
+ });
+
+ if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
+ window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
+ else
+ window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
+}
+
+/**
+ * Retrieves the root element for SVG manipulation. The element is then cached into the svgRoot global variable.
+ */
+function getRoot(root) {
+ if(typeof(svgRoot) == "undefined") {
+ var g = null;
+
+ g = root.getElementById("viewport");
+
+ if(g == null)
+ g = root.getElementsByTagName('g')[0];
+
+ if(g == null)
+ alert('Unable to obtain SVG root element');
+
+ setCTM(g, g.getCTM());
+
+ g.removeAttribute("viewBox");
+
+ svgRoot = g;
+ }
+
+ return svgRoot;
+}
+
+/**
+ * Instance an SVGPoint object with given event coordinates.
+ */
+function getEventPoint(evt) {
+ var p = root.createSVGPoint();
+
+ p.x = evt.clientX;
+ p.y = evt.clientY;
+
+ return p;
+}
+
+/**
+ * Sets the current transform matrix of an element.
+ */
+function setCTM(element, matrix) {
+ var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
+
+ element.setAttribute("transform", s);
+}
+
+/**
+ * Dumps a matrix to a string (useful for debug).
+ */
+function dumpMatrix(matrix) {
+ var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n 0, 0, 1 ]";
+
+ return s;
+}
+
+/**
+ * Sets attributes of an element.
+ */
+function setAttributes(element, attributes){
+ for (var i in attributes)
+ element.setAttributeNS(null, i, attributes[i]);
+}
+
+/**
+ * Handle mouse wheel event.
+ */
+function handleMouseWheel(evt) {
+ if(!enableZoom)
+ return;
+
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var delta;
+
+ if(evt.wheelDelta)
+ delta = evt.wheelDelta / 3600; // Chrome/Safari
+ else
+ delta = evt.detail / -90; // Mozilla
+
+ var z = 1 + delta; // Zoom factor: 0.9/1.1
+
+ var g = getRoot(svgDoc);
+
+ var p = getEventPoint(evt);
+
+ p = p.matrixTransform(g.getCTM().inverse());
+
+ // Compute new scale matrix in current mouse position
+ var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
+
+ setCTM(g, g.getCTM().multiply(k));
+
+ if(typeof(stateTf) == "undefined")
+ stateTf = g.getCTM().inverse();
+
+ stateTf = stateTf.multiply(k.inverse());
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseMove(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = getRoot(svgDoc);
+
+ if(state == 'pan' && enablePan) {
+ // Pan mode
+ var p = getEventPoint(evt).matrixTransform(stateTf);
+
+ setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
+ } else if(state == 'drag' && enableDrag) {
+ // Drag mode
+ var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
+
+ setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
+
+ stateOrigin = p;
+ }
+}
+
+/**
+ * Handle click event.
+ */
+function handleMouseDown(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = getRoot(svgDoc);
+
+ if(
+ evt.target.tagName == "svg"
+ || !enableDrag // Pan anyway when drag is disabled and the user clicked on an element
+ ) {
+ // Pan mode
+ state = 'pan';
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ } else {
+ // Drag mode
+ state = 'drag';
+
+ stateTarget = evt.target;
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ }
+}
+
+/**
+ * Handle mouse button release event.
+ */
+function handleMouseUp(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ if(state == 'pan' || state == 'drag') {
+ // Quit pan mode
+ state = '';
+ }
+}
+
+`
diff --git a/src/cmd/pprof/internal/symbolizer/symbolizer.go b/src/cmd/pprof/internal/symbolizer/symbolizer.go
new file mode 100644
index 0000000000..06a3976838
--- /dev/null
+++ b/src/cmd/pprof/internal/symbolizer/symbolizer.go
@@ -0,0 +1,195 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package symbolizer provides a routine to populate a profile with
+// symbol, file and line number information. It relies on the
+// addr2liner and demangler packages to do the actual work.
+package symbolizer
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "cmd/pprof/internal/plugin"
+ "internal/pprof/profile"
+)
+
+// Symbolize adds symbol and line number information to all locations
+// in a profile. mode enables some options to control
+// symbolization. Currently only recognizes "force", which causes it
+// to overwrite any existing data.
+func Symbolize(mode string, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error {
+ force := false
+ // Disable some mechanisms based on mode string.
+ for _, o := range strings.Split(strings.ToLower(mode), ":") {
+ switch o {
+ case "force":
+ force = true
+ default:
+ }
+ }
+
+ if len(prof.Mapping) == 0 {
+ return fmt.Errorf("no known mappings")
+ }
+
+ mt, err := newMapping(prof, obj, ui, force)
+ if err != nil {
+ return err
+ }
+ defer mt.close()
+
+ functions := make(map[profile.Function]*profile.Function)
+ for _, l := range mt.prof.Location {
+ m := l.Mapping
+ segment := mt.segments[m]
+ if segment == nil {
+ // Nothing to do
+ continue
+ }
+
+ stack, err := segment.SourceLine(l.Address)
+ if err != nil || len(stack) == 0 {
+ // No answers from addr2line
+ continue
+ }
+
+ l.Line = make([]profile.Line, len(stack))
+ for i, frame := range stack {
+ if frame.Func != "" {
+ m.HasFunctions = true
+ }
+ if frame.File != "" {
+ m.HasFilenames = true
+ }
+ if frame.Line != 0 {
+ m.HasLineNumbers = true
+ }
+ f := &profile.Function{
+ Name: frame.Func,
+ SystemName: frame.Func,
+ Filename: frame.File,
+ }
+ if fp := functions[*f]; fp != nil {
+ f = fp
+ } else {
+ functions[*f] = f
+ f.ID = uint64(len(mt.prof.Function)) + 1
+ mt.prof.Function = append(mt.prof.Function, f)
+ }
+ l.Line[i] = profile.Line{
+ Function: f,
+ Line: int64(frame.Line),
+ }
+ }
+
+ if len(stack) > 0 {
+ m.HasInlineFrames = true
+ }
+ }
+ return nil
+}
+
+// newMapping creates a mappingTable for a profile.
+func newMapping(prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, force bool) (*mappingTable, error) {
+ mt := &mappingTable{
+ prof: prof,
+ segments: make(map[*profile.Mapping]plugin.ObjFile),
+ }
+
+ // Identify used mappings
+ mappings := make(map[*profile.Mapping]bool)
+ for _, l := range prof.Location {
+ mappings[l.Mapping] = true
+ }
+
+ for _, m := range prof.Mapping {
+ if !mappings[m] {
+ continue
+ }
+ // Do not attempt to re-symbolize a mapping that has already been symbolized.
+ if !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) {
+ continue
+ }
+
+ f, err := locateFile(obj, m.File, m.BuildID, m.Start)
+ if err != nil {
+ ui.PrintErr("Local symbolization failed for ", filepath.Base(m.File), ": ", err)
+ // Move on to other mappings
+ continue
+ }
+
+ if fid := f.BuildID(); m.BuildID != "" && fid != "" && fid != m.BuildID {
+ // Build ID mismatch - ignore.
+ f.Close()
+ continue
+ }
+
+ mt.segments[m] = f
+ }
+
+ return mt, nil
+}
+
+// locateFile opens a local file for symbolization on the search path
+// at $PPROF_BINARY_PATH. Looks inside these directories for files
+// named $BUILDID/$BASENAME and $BASENAME (if build id is available).
+func locateFile(obj plugin.ObjTool, file, buildID string, start uint64) (plugin.ObjFile, error) {
+ // Construct search path to examine
+ searchPath := os.Getenv("PPROF_BINARY_PATH")
+ if searchPath == "" {
+ // Use $HOME/pprof/binaries as default directory for local symbolization binaries
+ searchPath = filepath.Join(os.Getenv("HOME"), "pprof", "binaries")
+ }
+
+ // Collect names to search: {buildid/basename, basename}
+ var fileNames []string
+ if baseName := filepath.Base(file); buildID != "" {
+ fileNames = []string{filepath.Join(buildID, baseName), baseName}
+ } else {
+ fileNames = []string{baseName}
+ }
+ for _, path := range filepath.SplitList(searchPath) {
+ for nameIndex, name := range fileNames {
+ file := filepath.Join(path, name)
+ if f, err := obj.Open(file, start); err == nil {
+ fileBuildID := f.BuildID()
+ if buildID == "" || buildID == fileBuildID {
+ return f, nil
+ }
+ f.Close()
+ if nameIndex == 0 {
+ // If this is the first name, the path includes the build id. Report inconsistency.
+ return nil, fmt.Errorf("found file %s with inconsistent build id %s", file, fileBuildID)
+ }
+ }
+ }
+ }
+ // Try original file name
+ f, err := obj.Open(file, start)
+ if err == nil && buildID != "" {
+ if fileBuildID := f.BuildID(); fileBuildID != "" && fileBuildID != buildID {
+ // Mismatched build IDs, ignore
+ f.Close()
+ return nil, fmt.Errorf("mismatched build ids %s != %s", fileBuildID, buildID)
+ }
+ }
+ return f, err
+}
+
+// mappingTable contains the mechanisms for symbolization of a
+// profile.
+type mappingTable struct {
+ prof *profile.Profile
+ segments map[*profile.Mapping]plugin.ObjFile
+}
+
+// Close releases any external processes being used for the mapping.
+func (mt *mappingTable) close() {
+ for _, segment := range mt.segments {
+ segment.Close()
+ }
+}
diff --git a/src/cmd/pprof/internal/symbolz/symbolz.go b/src/cmd/pprof/internal/symbolz/symbolz.go
new file mode 100644
index 0000000000..6e58001962
--- /dev/null
+++ b/src/cmd/pprof/internal/symbolz/symbolz.go
@@ -0,0 +1,111 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package symbolz symbolizes a profile using the output from the symbolz
+// service.
+package symbolz
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "internal/pprof/profile"
+)
+
+var (
+ symbolzRE = regexp.MustCompile(`(0x[[:xdigit:]]+)\s+(.*)`)
+)
+
+// Symbolize symbolizes profile p by parsing data returned by a
+// symbolz handler. syms receives the symbolz query (hex addresses
+// separated by '+') and returns the symbolz output in a string. It
+// symbolizes all locations based on their addresses, regardless of
+// mapping.
+func Symbolize(source string, syms func(string, string) ([]byte, error), p *profile.Profile) error {
+ if source = symbolz(source, p); source == "" {
+ // If the source is not a recognizable URL, do nothing.
+ return nil
+ }
+
+ // Construct query of addresses to symbolize.
+ var a []string
+ for _, l := range p.Location {
+ if l.Address != 0 && len(l.Line) == 0 {
+ a = append(a, fmt.Sprintf("%#x", l.Address))
+ }
+ }
+
+ if len(a) == 0 {
+ // No addresses to symbolize.
+ return nil
+ }
+ lines := make(map[uint64]profile.Line)
+ functions := make(map[string]*profile.Function)
+ if b, err := syms(source, strings.Join(a, "+")); err == nil {
+ buf := bytes.NewBuffer(b)
+ for {
+ l, err := buf.ReadString('\n')
+
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if symbol := symbolzRE.FindStringSubmatch(l); len(symbol) == 3 {
+ addr, err := strconv.ParseUint(symbol[1], 0, 64)
+ if err != nil {
+ return fmt.Errorf("unexpected parse failure %s: %v", symbol[1], err)
+ }
+
+ name := symbol[2]
+ fn := functions[name]
+ if fn == nil {
+ fn = &profile.Function{
+ ID: uint64(len(p.Function) + 1),
+ Name: name,
+ SystemName: name,
+ }
+ functions[name] = fn
+ p.Function = append(p.Function, fn)
+ }
+
+ lines[addr] = profile.Line{Function: fn}
+ }
+ }
+ }
+
+ for _, l := range p.Location {
+ if line, ok := lines[l.Address]; ok {
+ l.Line = []profile.Line{line}
+ if l.Mapping != nil {
+ l.Mapping.HasFunctions = true
+ }
+ }
+ }
+
+ return nil
+}
+
+// symbolz returns the corresponding symbolz source for a profile URL.
+func symbolz(source string, p *profile.Profile) string {
+ if url, err := url.Parse(source); err == nil && url.Host != "" {
+ if last := strings.LastIndex(url.Path, "/"); last != -1 {
+ if strings.HasSuffix(url.Path[:last], "pprof") {
+ url.Path = url.Path[:last] + "/symbol"
+ } else {
+ url.Path = url.Path[:last] + "/symbolz"
+ }
+ return url.String()
+ }
+ }
+
+ return ""
+}
diff --git a/src/cmd/pprof/internal/tempfile/tempfile.go b/src/cmd/pprof/internal/tempfile/tempfile.go
new file mode 100644
index 0000000000..a5706345e4
--- /dev/null
+++ b/src/cmd/pprof/internal/tempfile/tempfile.go
@@ -0,0 +1,46 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tempfile provides tools to create and delete temporary files
+package tempfile
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// New returns an unused filename for output files.
+func New(dir, prefix, suffix string) (*os.File, error) {
+ for index := 1; index < 10000; index++ {
+ path := filepath.Join(dir, fmt.Sprintf("%s%03d%s", prefix, index, suffix))
+ if _, err := os.Stat(path); err != nil {
+ return os.Create(path)
+ }
+ }
+ // Give up
+ return nil, fmt.Errorf("could not create file of the form %s%03d%s", prefix, 1, suffix)
+}
+
+var tempFiles []string
+var tempFilesMu = sync.Mutex{}
+
+// DeferDelete marks a file or directory to be deleted by next call to Cleanup.
+func DeferDelete(path string) {
+ tempFilesMu.Lock()
+ tempFiles = append(tempFiles, path)
+ tempFilesMu.Unlock()
+}
+
+// Cleanup removes any temporary files or directories selected for deferred cleaning.
+// Similar to defer semantics, the nodes are deleted in LIFO order.
+func Cleanup() {
+ tempFilesMu.Lock()
+ for i := len(tempFiles) - 1; i >= 0; i-- {
+ os.Remove(tempFiles[i])
+ }
+ tempFiles = nil
+ tempFilesMu.Unlock()
+}
diff --git a/src/cmd/pprof/pprof.go b/src/cmd/pprof/pprof.go
index 01f44566ba..18479b45a6 100644
--- a/src/cmd/pprof/pprof.go
+++ b/src/cmd/pprof/pprof.go
@@ -15,12 +15,12 @@ import (
"sync"
"cmd/internal/objfile"
- "cmd/internal/pprof/commands"
- "cmd/internal/pprof/driver"
- "cmd/internal/pprof/fetch"
- "cmd/internal/pprof/plugin"
- "cmd/internal/pprof/symbolizer"
- "cmd/internal/pprof/symbolz"
+ "cmd/pprof/internal/commands"
+ "cmd/pprof/internal/driver"
+ "cmd/pprof/internal/fetch"
+ "cmd/pprof/internal/plugin"
+ "cmd/pprof/internal/symbolizer"
+ "cmd/pprof/internal/symbolz"
"internal/pprof/profile"
)