aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2012-02-22 21:46:12 -0500
committerRuss Cox <rsc@golang.org>2012-02-22 21:46:12 -0500
commitebae73bb24c7d4473055353ece1a82b0370f069f (patch)
tree770aab09607cfe96477e4fa29eb9914c1063b248
parente4b02bfdc09ce9e1307250382f3b985f35ca9723 (diff)
downloadgo-ebae73bb24c7d4473055353ece1a82b0370f069f.tar.gz
go-ebae73bb24c7d4473055353ece1a82b0370f069f.zip
pprof: add Profile type
Makes it possible for client code to maintain its own profiles, and also reduces the API surface by giving us a type that models built-in profiles. R=golang-dev, r CC=golang-dev https://golang.org/cl/5684056
-rw-r--r--src/pkg/net/http/pprof/pprof.go81
-rw-r--r--src/pkg/runtime/pprof/pprof.go504
-rw-r--r--src/pkg/text/tabwriter/tabwriter_test.go3
3 files changed, 508 insertions, 80 deletions
diff --git a/src/pkg/net/http/pprof/pprof.go b/src/pkg/net/http/pprof/pprof.go
index 0fe41b7d31..06fcde1447 100644
--- a/src/pkg/net/http/pprof/pprof.go
+++ b/src/pkg/net/http/pprof/pprof.go
@@ -22,9 +22,9 @@
//
// go tool pprof http://localhost:6060/debug/pprof/profile
//
-// Or to look at the thread creation profile:
+// Or to view all available profiles:
//
-// go tool pprof http://localhost:6060/debug/pprof/thread
+// go tool pprof http://localhost:6060/debug/pprof/
//
// For a study of the facility in action, visit
//
@@ -36,7 +36,9 @@ import (
"bufio"
"bytes"
"fmt"
+ "html/template"
"io"
+ "log"
"net/http"
"os"
"runtime"
@@ -47,11 +49,10 @@ import (
)
func init() {
+ http.Handle("/debug/pprof/", http.HandlerFunc(Index))
http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
- http.Handle("/debug/pprof/heap", http.HandlerFunc(Heap))
http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
- http.Handle("/debug/pprof/thread", http.HandlerFunc(Thread))
}
// Cmdline responds with the running program's
@@ -62,20 +63,6 @@ func Cmdline(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
}
-// Heap responds with the pprof-formatted heap profile.
-// The package initialization registers it as /debug/pprof/heap.
-func Heap(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- pprof.WriteHeapProfile(w)
-}
-
-// Thread responds with the pprof-formatted thread creation profile.
-// The package initialization registers it as /debug/pprof/thread.
-func Thread(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- pprof.WriteThreadProfile(w)
-}
-
// Profile responds with the pprof-formatted cpu profile.
// The package initialization registers it as /debug/pprof/profile.
func Profile(w http.ResponseWriter, r *http.Request) {
@@ -147,3 +134,61 @@ func Symbol(w http.ResponseWriter, r *http.Request) {
w.Write(buf.Bytes())
}
+
+// Handler returns an HTTP handler that serves the named profile.
+func Handler(name string) http.Handler {
+ return handler(name)
+}
+
+type handler string
+
+func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ debug, _ := strconv.Atoi(r.FormValue("debug"))
+ p := pprof.Lookup(string(name))
+ if p == nil {
+ w.WriteHeader(404)
+ fmt.Fprintf(w, "Unknown profile: %s\n", name)
+ return
+ }
+ p.WriteTo(w, debug)
+ return
+}
+
+// Index responds with the pprof-formatted profile named by the request.
+// For example, "/debug/pprof/heap" serves the "heap" profile.
+// Index responds to a request for "/debug/pprof/" with an HTML page
+// listing the available profiles.
+func Index(w http.ResponseWriter, r *http.Request) {
+ if strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
+ name := r.URL.Path[len("/debug/pprof/"):]
+ if name != "" {
+ handler(name).ServeHTTP(w, r)
+ return
+ }
+ }
+
+ profiles := pprof.Profiles()
+ if err := indexTmpl.Execute(w, profiles); err != nil {
+ log.Print(err)
+ }
+}
+
+var indexTmpl = template.Must(template.New("index").Parse(`<html>
+<head>
+<title>/debug/pprof/</title>
+</head>
+/debug/pprof/<br>
+<br>
+<body>
+profiles:<br>
+<table>
+{{range .}}
+<tr><td align=right>{{.Count}}<td><a href="/debug/pprof/{{.Name}}?debug=1">{{.Name}}</a>
+{{end}}
+</table>
+<br>
+<a href="/debug/pprof/goroutine?debug=2">full goroutine stack dump</a><br>
+</body>
+</html>
+`))
diff --git a/src/pkg/runtime/pprof/pprof.go b/src/pkg/runtime/pprof/pprof.go
index 42f04f320a..099bb6a92f 100644
--- a/src/pkg/runtime/pprof/pprof.go
+++ b/src/pkg/runtime/pprof/pprof.go
@@ -10,19 +10,354 @@ package pprof
import (
"bufio"
+ "bytes"
"fmt"
"io"
"runtime"
+ "sort"
+ "strings"
"sync"
+ "text/tabwriter"
)
// BUG(rsc): CPU profiling is broken on OS X, due to an Apple kernel bug.
// For details, see http://code.google.com/p/go/source/detail?r=35b716c94225.
-// WriteHeapProfile writes a pprof-formatted heap profile to w.
-// If a write to w returns an error, WriteHeapProfile returns that error.
-// Otherwise, WriteHeapProfile returns nil.
+// A Profile is a collection of stack traces showing the call sequences
+// that led to instances of a particular event, such as allocation.
+// Packages can create and maintain their own profiles; the most common
+// use is for tracking resources that must be explicitly closed, such as files
+// or network connections.
+//
+// A Profile's methods can be called from multiple goroutines simultaneously.
+//
+// Each Profile has a unique name. A few profiles are predefined:
+//
+// goroutine - stack traces of all current goroutines
+// heap - a sampling of all heap allocations
+// threadcreate - stack traces that led to the creation of new OS threads
+//
+// These predefine profiles maintain themselves and panic on an explicit
+// Add or Remove method call.
+//
+// The CPU profile is not available as a Profile. It has a special API,
+// the StartCPUProfile and StopCPUProfile functions, because it streams
+// output to a writer during profiling.
+//
+type Profile struct {
+ name string
+ mu sync.Mutex
+ m map[interface{}][]uintptr
+ count func() int
+ write func(io.Writer, int) error
+}
+
+// profiles records all registered profiles.
+var profiles struct {
+ mu sync.Mutex
+ m map[string]*Profile
+}
+
+var goroutineProfile = &Profile{
+ name: "goroutine",
+ count: countGoroutine,
+ write: writeGoroutine,
+}
+
+var threadcreateProfile = &Profile{
+ name: "threadcreate",
+ count: countThreadCreate,
+ write: writeThreadCreate,
+}
+
+var heapProfile = &Profile{
+ name: "heap",
+ count: countHeap,
+ write: writeHeap,
+}
+
+func lockProfiles() {
+ profiles.mu.Lock()
+ if profiles.m == nil {
+ // Initial built-in profiles.
+ profiles.m = map[string]*Profile{
+ "goroutine": goroutineProfile,
+ "threadcreate": threadcreateProfile,
+ "heap": heapProfile,
+ }
+ }
+}
+
+func unlockProfiles() {
+ profiles.mu.Unlock()
+}
+
+// NewProfile creates a new profile with the given name.
+// If a profile with that name already exists, NewProfile panics.
+// The convention is to use a 'import/path.' prefix to create
+// separate name spaces for each package.
+func NewProfile(name string) *Profile {
+ lockProfiles()
+ defer unlockProfiles()
+ if name == "" {
+ panic("pprof: NewProfile with empty name")
+ }
+ if profiles.m[name] != nil {
+ panic("pprof: NewProfile name already in use: " + name)
+ }
+ p := &Profile{
+ name: name,
+ m: map[interface{}][]uintptr{},
+ }
+ profiles.m[name] = p
+ return p
+}
+
+// Lookup returns the profile with the given name, or nil if no such profile exists.
+func Lookup(name string) *Profile {
+ lockProfiles()
+ defer unlockProfiles()
+ return profiles.m[name]
+}
+
+// Profiles returns a slice of all the known profiles, sorted by name.
+func Profiles() []*Profile {
+ lockProfiles()
+ defer unlockProfiles()
+
+ var all []*Profile
+ for _, p := range profiles.m {
+ all = append(all, p)
+ }
+
+ sort.Sort(byName(all))
+ return all
+}
+
+type byName []*Profile
+
+func (x byName) Len() int { return len(x) }
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byName) Less(i, j int) bool { return x[i].name < x[j].name }
+
+// Name returns this profile's name, which can be passed to Lookup to reobtain the profile.
+func (p *Profile) Name() string {
+ return p.name
+}
+
+// Count returns the number of execution stacks currently in the profile.
+func (p *Profile) Count() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.count != nil {
+ return p.count()
+ }
+ return len(p.m)
+}
+
+// Add adds the current execution stack to the profile, associated with value.
+// Add stores value in an internal map, so value must be suitable for use as
+// a map key and will not be garbage collected until the corresponding
+// call to Remove. Add panics if the profile already contains a stack for value.
+//
+// The skip parameter has the same meaning as runtime.Caller's skip
+// and controls where the stack trace begins. Passing skip=0 begins the
+// trace in the function calling Add. For example, given this
+// execution stack:
+//
+// Add
+// called from rpc.NewClient
+// called from mypkg.Run
+// called from main.main
+//
+// Passing skip=0 begins the stack trace at the call to Add inside rpc.NewClient.
+// Passing skip=1 begins the stack trace at the call to NewClient inside mypkg.Run.
+//
+func (p *Profile) Add(value interface{}, skip int) {
+ if p.name == "" {
+ panic("pprof: use of uninitialized Profile")
+ }
+ if p.write != nil {
+ panic("pprof: Add called on built-in Profile " + p.name)
+ }
+
+ stk := make([]uintptr, 32)
+ n := runtime.Callers(skip+1, stk[:])
+
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.m[value] != nil {
+ panic("pprof: Profile.Add of duplicate value")
+ }
+ p.m[value] = stk[:n]
+}
+
+// Remove removes the execution stack associated with value from the profile.
+// It is a no-op if the value is not in the profile.
+func (p *Profile) Remove(value interface{}) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ delete(p.m, value)
+}
+
+// WriteTo writes a pprof-formatted snapshot of the profile to w.
+// If a write to w returns an error, WriteTo returns that error.
+// Otherwise, WriteTo returns nil.
+//
+// The debug parameter enables additional output.
+// Passing debug=0 prints only the hexadecimal addresses that pprof needs.
+// Passing debug=1 adds comments translating addresses to function names
+// and line numbers, so that a programmer can read the profile without tools.
+//
+// The predefined profiles may assign meaning to other debug values;
+// for example, when printing the "goroutine" profile, debug=2 means to
+// print the goroutine stacks in the same form that a Go program uses
+// when dying due to an unrecovered panic.
+func (p *Profile) WriteTo(w io.Writer, debug int) error {
+ if p.name == "" {
+ panic("pprof: use of zero Profile")
+ }
+ if p.write != nil {
+ return p.write(w, debug)
+ }
+
+ // Obtain consistent snapshot under lock; then process without lock.
+ var all [][]uintptr
+ p.mu.Lock()
+ for _, stk := range p.m {
+ all = append(all, stk)
+ }
+ p.mu.Unlock()
+
+ // Map order is non-deterministic; make output deterministic.
+ sort.Sort(stackProfile(all))
+
+ return printCountProfile(w, debug, p.name, stackProfile(all))
+}
+
+type stackProfile [][]uintptr
+
+func (x stackProfile) Len() int { return len(x) }
+func (x stackProfile) Stack(i int) []uintptr { return x[i] }
+func (x stackProfile) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x stackProfile) Less(i, j int) bool {
+ t, u := x[i], x[j]
+ for k := 0; k < len(t) && k < len(u); k++ {
+ if t[k] != u[k] {
+ return t[k] < u[k]
+ }
+ }
+ return len(t) < len(u)
+}
+
+// A countProfile is a set of stack traces to be printed as counts
+// grouped by stack trace. There are multiple implementations:
+// all that matters is that we can find out how many traces there are
+// and obtain each trace in turn.
+type countProfile interface {
+ Len() int
+ Stack(i int) []uintptr
+}
+
+// printCountProfile prints a countProfile at the specified debug level.
+func printCountProfile(w io.Writer, debug int, name string, p countProfile) error {
+ b := bufio.NewWriter(w)
+ var tw *tabwriter.Writer
+ w = b
+ if debug > 0 {
+ tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ w = tw
+ }
+
+ fmt.Fprintf(w, "%s profile: total %d\n", name, p.Len())
+
+ // Build count of each stack.
+ var buf bytes.Buffer
+ key := func(stk []uintptr) string {
+ buf.Reset()
+ fmt.Fprintf(&buf, "@")
+ for _, pc := range stk {
+ fmt.Fprintf(&buf, " %#x", pc)
+ }
+ return buf.String()
+ }
+ m := map[string]int{}
+ n := p.Len()
+ for i := 0; i < n; i++ {
+ m[key(p.Stack(i))]++
+ }
+
+ // Print stacks, listing count on first occurrence of a unique stack.
+ for i := 0; i < n; i++ {
+ stk := p.Stack(i)
+ s := key(stk)
+ if count := m[s]; count != 0 {
+ fmt.Fprintf(w, "%d %s\n", count, s)
+ if debug > 0 {
+ printStackRecord(w, stk, false)
+ }
+ delete(m, s)
+ }
+ }
+
+ if tw != nil {
+ tw.Flush()
+ }
+ return b.Flush()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
+ show := allFrames
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ show = true
+ fmt.Fprintf(w, "#\t%#x\n", pc)
+ } else {
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ // This is useful mainly for allocation traces.
+ if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ show = true
+ fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", pc, f.Name(), pc-f.Entry(), file, line)
+ }
+ }
+ if !show {
+ // We didn't print anything; do it again,
+ // and this time include runtime functions.
+ printStackRecord(w, stk, true)
+ return
+ }
+ fmt.Fprintf(w, "\n")
+}
+
+// Interface to system profiles.
+
+type byInUseBytes []runtime.MemProfileRecord
+
+func (x byInUseBytes) Len() int { return len(x) }
+func (x byInUseBytes) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byInUseBytes) Less(i, j int) bool { return x[i].InUseBytes() > x[j].InUseBytes() }
+
+// WriteHeapProfile is shorthand for Lookup("heap").WriteTo(w, 0).
+// It is preserved for backwards compatibility.
func WriteHeapProfile(w io.Writer) error {
+ return writeHeap(w, 0)
+}
+
+// countHeap returns the number of records in the heap profile.
+func countHeap() int {
+ n, _ := runtime.MemProfile(nil, false)
+ return n
+}
+
+// writeHeapProfile writes the current runtime heap profile to w.
+func writeHeap(w io.Writer, debug int) error {
// Find out how many records there are (MemProfile(nil, false)),
// allocate that many records, and get the data.
// There's a race—more records might be added between
@@ -44,6 +379,16 @@ func WriteHeapProfile(w io.Writer) error {
// Profile grew; try again.
}
+ sort.Sort(byInUseBytes(p))
+
+ b := bufio.NewWriter(w)
+ var tw *tabwriter.Writer
+ w = b
+ if debug > 0 {
+ tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ w = tw
+ }
+
var total runtime.MemProfileRecord
for i := range p {
r := &p[i]
@@ -56,78 +401,120 @@ func WriteHeapProfile(w io.Writer) error {
// Technically the rate is MemProfileRate not 2*MemProfileRate,
// but early versions of the C++ heap profiler reported 2*MemProfileRate,
// so that's what pprof has come to expect.
- b := bufio.NewWriter(w)
- fmt.Fprintf(b, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
+ fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
total.InUseObjects(), total.InUseBytes(),
total.AllocObjects, total.AllocBytes,
2*runtime.MemProfileRate)
for i := range p {
r := &p[i]
- fmt.Fprintf(b, "%d: %d [%d: %d] @",
+ fmt.Fprintf(w, "%d: %d [%d: %d] @",
r.InUseObjects(), r.InUseBytes(),
r.AllocObjects, r.AllocBytes)
for _, pc := range r.Stack() {
- fmt.Fprintf(b, " %#x", pc)
+ fmt.Fprintf(w, " %#x", pc)
+ }
+ fmt.Fprintf(w, "\n")
+ if debug > 0 {
+ printStackRecord(w, r.Stack(), false)
}
- fmt.Fprintf(b, "\n")
}
// Print memstats information too.
- // Pprof will ignore, but useful for people.
- s := new(runtime.MemStats)
- runtime.ReadMemStats(s)
- fmt.Fprintf(b, "\n# runtime.MemStats\n")
- fmt.Fprintf(b, "# Alloc = %d\n", s.Alloc)
- fmt.Fprintf(b, "# TotalAlloc = %d\n", s.TotalAlloc)
- fmt.Fprintf(b, "# Sys = %d\n", s.Sys)
- fmt.Fprintf(b, "# Lookups = %d\n", s.Lookups)
- fmt.Fprintf(b, "# Mallocs = %d\n", s.Mallocs)
-
- fmt.Fprintf(b, "# HeapAlloc = %d\n", s.HeapAlloc)
- fmt.Fprintf(b, "# HeapSys = %d\n", s.HeapSys)
- fmt.Fprintf(b, "# HeapIdle = %d\n", s.HeapIdle)
- fmt.Fprintf(b, "# HeapInuse = %d\n", s.HeapInuse)
-
- fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
- fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
- fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
- fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys)
-
- fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC)
- fmt.Fprintf(b, "# PauseNs = %d\n", s.PauseNs)
- fmt.Fprintf(b, "# NumGC = %d\n", s.NumGC)
- fmt.Fprintf(b, "# EnableGC = %v\n", s.EnableGC)
- fmt.Fprintf(b, "# DebugGC = %v\n", s.DebugGC)
-
- fmt.Fprintf(b, "# BySize = Size * (Active = Mallocs - Frees)\n")
- fmt.Fprintf(b, "# (Excluding large blocks.)\n")
- for _, t := range s.BySize {
- if t.Mallocs > 0 {
- fmt.Fprintf(b, "# %d * (%d = %d - %d)\n", t.Size, t.Mallocs-t.Frees, t.Mallocs, t.Frees)
- }
+ // Pprof will ignore, but useful for people
+ if debug > 0 {
+ s := new(runtime.MemStats)
+ runtime.ReadMemStats(s)
+ fmt.Fprintf(w, "\n# runtime.MemStats\n")
+ fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
+ fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
+ fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
+ fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
+ fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
+
+ fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
+ fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
+ fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
+ fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
+
+ fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
+ fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
+ fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
+ fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
+
+ fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
+ fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
+ fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
+ fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
+ fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
+ }
+
+ if tw != nil {
+ tw.Flush()
}
return b.Flush()
}
-// WriteThreadProfile writes a pprof-formatted thread creation profile to w.
-// If a write to w returns an error, WriteThreadProfile returns that error.
-// Otherwise, WriteThreadProfile returns nil.
-func WriteThreadProfile(w io.Writer) error {
- // Find out how many records there are (ThreadProfile(nil)),
+// countThreadCreate returns the size of the current ThreadCreateProfile.
+func countThreadCreate() int {
+ n, _ := runtime.ThreadCreateProfile(nil)
+ return n
+}
+
+// writeThreadCreate writes the current runtime ThreadCreateProfile to w.
+func writeThreadCreate(w io.Writer, debug int) error {
+ return writeRuntimeProfile(w, debug, "threadcreate", runtime.ThreadCreateProfile)
+}
+
+// countGoroutine returns the number of goroutines.
+func countGoroutine() int {
+ return runtime.NumGoroutine()
+}
+
+// writeGoroutine writes the current runtime GoroutineProfile to w.
+func writeGoroutine(w io.Writer, debug int) error {
+ if debug >= 2 {
+ return writeGoroutineStacks(w)
+ }
+ return writeRuntimeProfile(w, debug, "goroutine", runtime.GoroutineProfile)
+}
+
+func writeGoroutineStacks(w io.Writer) error {
+ // We don't know how big the buffer needs to be to collect
+ // all the goroutines. Start with 1 MB and try a few times, doubling each time.
+ // Give up and use a truncated trace if 64 MB is not enough.
+ buf := make([]byte, 1<<20)
+ for i := 0; ; i++ {
+ n := runtime.Stack(buf, true)
+ if n < len(buf) {
+ buf = buf[:n]
+ break
+ }
+ if len(buf) >= 64<<20 {
+ // Filled 64 MB - stop there.
+ break
+ }
+ buf = make([]byte, 2*len(buf))
+ }
+ _, err := w.Write(buf)
+ return err
+}
+
+func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runtime.StackRecord) (int, bool)) error {
+ // Find out how many records there are (fetch(nil)),
// allocate that many records, and get the data.
- // There's a race—more records (threads) might be added between
+ // There's a race—more records might be added between
// the two calls—so allocate a few extra records for safety
// and also try again if we're very unlucky.
// The loop should only execute one iteration in the common case.
- var p []runtime.ThreadProfileRecord
- n, ok := runtime.ThreadProfile(nil)
+ var p []runtime.StackRecord
+ n, ok := fetch(nil)
for {
// Allocate room for a slightly bigger profile,
// in case a few more entries have been added
// since the call to ThreadProfile.
- p = make([]runtime.ThreadProfileRecord, n+10)
- n, ok = runtime.ThreadProfile(p)
+ p = make([]runtime.StackRecord, n+10)
+ n, ok = fetch(p)
if ok {
p = p[0:n]
break
@@ -135,19 +522,14 @@ func WriteThreadProfile(w io.Writer) error {
// Profile grew; try again.
}
- b := bufio.NewWriter(w)
- fmt.Fprintf(b, "thread creation profile: %d threads\n", n)
- for i := range p {
- r := &p[i]
- fmt.Fprintf(b, "@")
- for _, pc := range r.Stack() {
- fmt.Fprintf(b, " %#x", pc)
- }
- fmt.Fprintf(b, "\n")
- }
- return b.Flush()
+ return printCountProfile(w, debug, name, runtimeProfile(p))
}
+type runtimeProfile []runtime.StackRecord
+
+func (p runtimeProfile) Len() int { return len(p) }
+func (p runtimeProfile) Stack(i int) []uintptr { return p[i].Stack() }
+
var cpu struct {
sync.Mutex
profiling bool
diff --git a/src/pkg/text/tabwriter/tabwriter_test.go b/src/pkg/text/tabwriter/tabwriter_test.go
index 1ffb330d43..ace5356473 100644
--- a/src/pkg/text/tabwriter/tabwriter_test.go
+++ b/src/pkg/text/tabwriter/tabwriter_test.go
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package tabwriter
+package tabwriter_test
import (
"io"
"testing"
+ . "text/tabwriter"
)
type buffer struct {