aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/trace
diff options
context:
space:
mode:
authorHana Kim <hyangah@gmail.com>2018-11-19 12:30:56 -0500
committerHyang-Ah Hana Kim <hyangah@gmail.com>2018-11-22 02:59:55 +0000
commit6d5caf38e37bf9aeba3291f1f0b0081f934b1187 (patch)
treec44550792352d1698edc6ab9dec812758cc566ca /src/cmd/trace
parent47df645473210267fd7512c5b92de00908198974 (diff)
downloadgo-6d5caf38e37bf9aeba3291f1f0b0081f934b1187.tar.gz
go-6d5caf38e37bf9aeba3291f1f0b0081f934b1187.zip
cmd/trace: revert internal/traceparser
The performance improvement is not as big as we hoped. Until the API is feature complete, we postpone the release and avoid added complexity. This change was prepared by reverting all the changes affected src/cmd/trace and src/internal/traceparser packages after golang.org/cl/137635, and then bringing back MMU computation APIs (originally in src/internal/traceparser) to the src/internal/trace package. Revert "cmd/trace: use new traceparser to parse the raw trace files" This reverts https://golang.org/cl/145457 (commit 08816cb8d7ed16b9c804587ff02c1ad1c3af6cd5). Revert "internal/traceparser: provide parser that uses less space and parses segments of runtime trace files" This reverts https://golang.org/cl/137635 (commit daaf361f74c3665bcb364356c5a9dd9f536c78c3). Change-Id: Ic2a068a7dbaf4053cd9674ca7bde9c58e74385b4 Reviewed-on: https://go-review.googlesource.com/c/150517 Run-TryBot: Hyang-Ah Hana Kim <hyangah@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/cmd/trace')
-rw-r--r--src/cmd/trace/annotations.go7
-rw-r--r--src/cmd/trace/annotations_test.go14
-rw-r--r--src/cmd/trace/goroutines.go11
-rw-r--r--src/cmd/trace/main.go49
-rw-r--r--src/cmd/trace/mmu.go6
-rw-r--r--src/cmd/trace/pprof.go64
-rw-r--r--src/cmd/trace/trace.go168
-rw-r--r--src/cmd/trace/trace_test.go46
-rw-r--r--src/cmd/trace/trace_unix_test.go12
9 files changed, 180 insertions, 197 deletions
diff --git a/src/cmd/trace/annotations.go b/src/cmd/trace/annotations.go
index a4933b51bf..2fb1198cf6 100644
--- a/src/cmd/trace/annotations.go
+++ b/src/cmd/trace/annotations.go
@@ -8,6 +8,7 @@ import (
"bytes"
"fmt"
"html/template"
+ "internal/trace"
"log"
"math"
"net/http"
@@ -16,8 +17,6 @@ import (
"strconv"
"strings"
"time"
-
- trace "internal/traceparser"
)
func init() {
@@ -309,7 +308,7 @@ func analyzeAnnotations() (annotationAnalysisResult, error) {
}
}
// combine region info.
- analyzeGoroutines(res)
+ analyzeGoroutines(events)
for goid, stats := range gs {
// gs is a global var defined in goroutines.go as a result
// of analyzeGoroutines. TODO(hyangah): fix this not to depend
@@ -322,7 +321,7 @@ func analyzeAnnotations() (annotationAnalysisResult, error) {
}
var frame trace.Frame
if s.Start != nil {
- frame = *res.Stacks[s.Start.StkID][0]
+ frame = *s.Start.Stk[0]
}
id := regionTypeID{Frame: frame, Type: s.Name}
regions[id] = append(regions[id], regionDesc{UserRegionDesc: s, G: goid})
diff --git a/src/cmd/trace/annotations_test.go b/src/cmd/trace/annotations_test.go
index 8b9daabcdb..a9068d53c1 100644
--- a/src/cmd/trace/annotations_test.go
+++ b/src/cmd/trace/annotations_test.go
@@ -11,7 +11,7 @@ import (
"context"
"flag"
"fmt"
- "internal/traceparser"
+ traceparser "internal/trace"
"io/ioutil"
"reflect"
"runtime/debug"
@@ -338,8 +338,10 @@ func traceProgram(t *testing.T, f func(), name string) error {
trace.Stop()
saveTrace(buf, name)
- res, err := traceparser.ParseBuffer(buf)
- if err != nil {
+ res, err := traceparser.Parse(buf, name+".faketrace")
+ if err == traceparser.ErrTimeOrder {
+ t.Skipf("skipping due to golang.org/issue/16755: %v", err)
+ } else if err != nil {
return err
}
@@ -368,15 +370,15 @@ func childrenNames(task *taskDesc) (ret []string) {
return ret
}
-func swapLoaderData(res *traceparser.Parsed, err error) {
+func swapLoaderData(res traceparser.ParseResult, err error) {
// swap loader's data.
parseTrace() // fool loader.once.
loader.res = res
loader.err = err
- analyzeGoroutines(res) // fool gsInit once.
- gs = res.GoroutineStats()
+ analyzeGoroutines(nil) // fool gsInit once.
+ gs = traceparser.GoroutineStats(res.Events)
}
diff --git a/src/cmd/trace/goroutines.go b/src/cmd/trace/goroutines.go
index c954704a47..548871a82c 100644
--- a/src/cmd/trace/goroutines.go
+++ b/src/cmd/trace/goroutines.go
@@ -9,6 +9,7 @@ package main
import (
"fmt"
"html/template"
+ "internal/trace"
"log"
"net/http"
"reflect"
@@ -16,8 +17,6 @@ import (
"strconv"
"sync"
"time"
-
- trace "internal/traceparser"
)
func init() {
@@ -39,15 +38,15 @@ var (
)
// analyzeGoroutines generates statistics about execution of all goroutines and stores them in gs.
-func analyzeGoroutines(res *trace.Parsed) {
+func analyzeGoroutines(events []*trace.Event) {
gsInit.Do(func() {
- gs = res.GoroutineStats()
+ gs = trace.GoroutineStats(events)
})
}
// httpGoroutines serves list of goroutine groups.
func httpGoroutines(w http.ResponseWriter, r *http.Request) {
- events, err := parseTrace()
+ events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@@ -90,7 +89,7 @@ Goroutines: <br>
func httpGoroutine(w http.ResponseWriter, r *http.Request) {
// TODO(hyangah): support format=csv (raw data)
- events, err := parseTrace()
+ events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go
index 2f71a3d4bd..f94586abf3 100644
--- a/src/cmd/trace/main.go
+++ b/src/cmd/trace/main.go
@@ -5,12 +5,12 @@
package main
import (
- "bytes"
+ "bufio"
"cmd/internal/browser"
"flag"
"fmt"
"html/template"
- trace "internal/traceparser"
+ "internal/trace"
"io"
"log"
"net"
@@ -115,22 +115,8 @@ func main() {
dief("%v\n", err)
}
- if *debugFlag { // match go tool trace -d (except for Offset and Seq)
- f := func(ev *trace.Event) {
- desc := trace.EventDescriptions[ev.Type]
- w := new(bytes.Buffer)
- fmt.Fprintf(w, "%v %v p=%v g=%v", ev.Ts, desc.Name, ev.P, ev.G)
- for i, a := range desc.Args {
- fmt.Fprintf(w, " %v=%v", a, ev.Args[i])
- }
- for i, a := range desc.SArgs {
- fmt.Fprintf(w, " %v=%v", a, ev.SArgs[i])
- }
- fmt.Println(w.String())
- }
- for i := 0; i < len(res.Events); i++ {
- f(res.Events[i])
- }
+ if *debugFlag {
+ trace.Print(res.Events)
os.Exit(0)
}
reportMemoryUsage("after parsing trace")
@@ -155,23 +141,36 @@ var ranges []Range
var loader struct {
once sync.Once
- res *trace.Parsed
+ res trace.ParseResult
err error
}
-func parseTrace() (*trace.Parsed, error) {
+// parseEvents is a compatibility wrapper that returns only
+// the Events part of trace.ParseResult returned by parseTrace.
+func parseEvents() ([]*trace.Event, error) {
+ res, err := parseTrace()
+ if err != nil {
+ return nil, err
+ }
+ return res.Events, err
+}
+
+func parseTrace() (trace.ParseResult, error) {
loader.once.Do(func() {
- x, err := trace.New(traceFile)
+ tracef, err := os.Open(traceFile)
if err != nil {
- loader.err = err
+ loader.err = fmt.Errorf("failed to open trace file: %v", err)
return
}
- err = x.Parse(0, x.MaxTs, nil)
+ defer tracef.Close()
+
+ // Parse and symbolize.
+ res, err := trace.Parse(bufio.NewReader(tracef), programBinary)
if err != nil {
- loader.err = err
+ loader.err = fmt.Errorf("failed to parse trace: %v", err)
return
}
- loader.res = x
+ loader.res = res
})
return loader.res, loader.err
}
diff --git a/src/cmd/trace/mmu.go b/src/cmd/trace/mmu.go
index 6a7d28e61d..b92fac652c 100644
--- a/src/cmd/trace/mmu.go
+++ b/src/cmd/trace/mmu.go
@@ -28,7 +28,7 @@ package main
import (
"encoding/json"
"fmt"
- trace "internal/traceparser"
+ "internal/trace"
"log"
"math"
"net/http"
@@ -83,11 +83,11 @@ func getMMUCurve(r *http.Request) ([][]trace.MutatorUtil, *trace.MMUCurve, error
mmuCache.lock.Unlock()
c.init.Do(func() {
- tr, err := parseTrace()
+ events, err := parseEvents()
if err != nil {
c.err = err
} else {
- c.util = tr.MutatorUtilization(flags)
+ c.util = trace.MutatorUtilization(events, flags)
c.mmuCurve = trace.NewMMUCurve(c.util)
}
})
diff --git a/src/cmd/trace/pprof.go b/src/cmd/trace/pprof.go
index cf74fe56ae..3389d2799b 100644
--- a/src/cmd/trace/pprof.go
+++ b/src/cmd/trace/pprof.go
@@ -9,6 +9,7 @@ package main
import (
"bufio"
"fmt"
+ "internal/trace"
"io"
"io/ioutil"
"net/http"
@@ -20,8 +21,6 @@ import (
"strconv"
"time"
- trace "internal/traceparser"
-
"github.com/google/pprof/profile"
)
@@ -61,22 +60,22 @@ type interval struct {
begin, end int64 // nanoseconds.
}
-func pprofByGoroutine(compute func(io.Writer, map[uint64][]interval, *trace.Parsed) error) func(w io.Writer, r *http.Request) error {
+func pprofByGoroutine(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error {
return func(w io.Writer, r *http.Request) error {
id := r.FormValue("id")
- res, err := parseTrace()
+ events, err := parseEvents()
if err != nil {
return err
}
- gToIntervals, err := pprofMatchingGoroutines(id, res)
+ gToIntervals, err := pprofMatchingGoroutines(id, events)
if err != nil {
return err
}
- return compute(w, gToIntervals, res)
+ return compute(w, gToIntervals, events)
}
}
-func pprofByRegion(compute func(io.Writer, map[uint64][]interval, *trace.Parsed) error) func(w io.Writer, r *http.Request) error {
+func pprofByRegion(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error {
return func(w io.Writer, r *http.Request) error {
filter, err := newRegionFilter(r)
if err != nil {
@@ -86,7 +85,7 @@ func pprofByRegion(compute func(io.Writer, map[uint64][]interval, *trace.Parsed)
if err != nil {
return err
}
- events, _ := parseTrace()
+ events, _ := parseEvents()
return compute(w, gToIntervals, events)
}
@@ -95,7 +94,7 @@ func pprofByRegion(compute func(io.Writer, map[uint64][]interval, *trace.Parsed)
// pprofMatchingGoroutines parses the goroutine type id string (i.e. pc)
// and returns the ids of goroutines of the matching type and its interval.
// If the id string is empty, returns nil without an error.
-func pprofMatchingGoroutines(id string, p *trace.Parsed) (map[uint64][]interval, error) {
+func pprofMatchingGoroutines(id string, events []*trace.Event) (map[uint64][]interval, error) {
if id == "" {
return nil, nil
}
@@ -103,7 +102,7 @@ func pprofMatchingGoroutines(id string, p *trace.Parsed) (map[uint64][]interval,
if err != nil {
return nil, fmt.Errorf("invalid goroutine type: %v", id)
}
- analyzeGoroutines(p)
+ analyzeGoroutines(events)
var res map[uint64][]interval
for _, g := range gs {
if g.PC != pc {
@@ -172,25 +171,17 @@ func pprofMatchingRegions(filter *regionFilter) (map[uint64][]interval, error) {
return gToIntervals, nil
}
-func stklen(p *trace.Parsed, ev *trace.Event) int {
- if ev.StkID == 0 {
- return 0
- }
- return len(p.Stacks[ev.StkID])
-}
-
// computePprofIO generates IO pprof-like profile (time spent in IO wait, currently only network blocking event).
-func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
- events := res.Events
- prof := make(map[uint32]Record)
+func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
+ prof := make(map[uint64]Record)
for _, ev := range events {
- if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
+ if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
overlapping := pprofOverlappingDuration(gToIntervals, ev)
if overlapping > 0 {
rec := prof[ev.StkID]
- rec.stk = res.Stacks[ev.StkID]
+ rec.stk = ev.Stk
rec.n++
rec.time += overlapping.Nanoseconds()
prof[ev.StkID] = rec
@@ -200,9 +191,8 @@ func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, res *trace.
}
// computePprofBlock generates blocking pprof-like profile (time spent blocked on synchronization primitives).
-func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
- events := res.Events
- prof := make(map[uint32]Record)
+func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
+ prof := make(map[uint64]Record)
for _, ev := range events {
switch ev.Type {
case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect,
@@ -213,13 +203,13 @@ func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, res *tra
default:
continue
}
- if ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
+ if ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
overlapping := pprofOverlappingDuration(gToIntervals, ev)
if overlapping > 0 {
rec := prof[ev.StkID]
- rec.stk = res.Stacks[ev.StkID]
+ rec.stk = ev.Stk
rec.n++
rec.time += overlapping.Nanoseconds()
prof[ev.StkID] = rec
@@ -229,17 +219,16 @@ func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, res *tra
}
// computePprofSyscall generates syscall pprof-like profile (time spent blocked in syscalls).
-func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
- events := res.Events
- prof := make(map[uint32]Record)
+func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
+ prof := make(map[uint64]Record)
for _, ev := range events {
- if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
+ if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
overlapping := pprofOverlappingDuration(gToIntervals, ev)
if overlapping > 0 {
rec := prof[ev.StkID]
- rec.stk = res.Stacks[ev.StkID]
+ rec.stk = ev.Stk
rec.n++
rec.time += overlapping.Nanoseconds()
prof[ev.StkID] = rec
@@ -250,18 +239,17 @@ func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, res *t
// computePprofSched generates scheduler latency pprof-like profile
// (time between a goroutine become runnable and actually scheduled for execution).
-func computePprofSched(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
- events := res.Events
- prof := make(map[uint32]Record)
+func computePprofSched(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
+ prof := make(map[uint64]Record)
for _, ev := range events {
if (ev.Type != trace.EvGoUnblock && ev.Type != trace.EvGoCreate) ||
- ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
+ ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
overlapping := pprofOverlappingDuration(gToIntervals, ev)
if overlapping > 0 {
rec := prof[ev.StkID]
- rec.stk = res.Stacks[ev.StkID]
+ rec.stk = ev.Stk
rec.n++
rec.time += overlapping.Nanoseconds()
prof[ev.StkID] = rec
@@ -339,7 +327,7 @@ func serveSVGProfile(prof func(w io.Writer, r *http.Request) error) http.Handler
}
}
-func buildProfile(prof map[uint32]Record) *profile.Profile {
+func buildProfile(prof map[uint64]Record) *profile.Profile {
p := &profile.Profile{
PeriodType: &profile.ValueType{Type: "trace", Unit: "count"},
Period: 1,
diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go
index d467f371fa..f39a397d0d 100644
--- a/src/cmd/trace/trace.go
+++ b/src/cmd/trace/trace.go
@@ -7,7 +7,7 @@ package main
import (
"encoding/json"
"fmt"
- trace "internal/traceparser"
+ "internal/trace"
"io"
"log"
"math"
@@ -23,7 +23,7 @@ import (
func init() {
http.HandleFunc("/trace", httpTrace)
- http.HandleFunc("/jsontrace", httpJSONTrace)
+ http.HandleFunc("/jsontrace", httpJsonTrace)
http.HandleFunc("/trace_viewer_html", httpTraceViewerHTML)
}
@@ -38,7 +38,7 @@ func httpTrace(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- html := strings.Replace(templTrace, "{{PARAMS}}", r.Form.Encode(), -1)
+ html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode())
w.Write([]byte(html))
}
@@ -165,8 +165,8 @@ func httpTraceViewerHTML(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath.Join(runtime.GOROOT(), "misc", "trace", "trace_viewer_full.html"))
}
-// httpJSONTrace serves json trace, requested from within templTrace HTML.
-func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
+// httpJsonTrace serves json trace, requested from within templTrace HTML.
+func httpJsonTrace(w http.ResponseWriter, r *http.Request) {
defer debug.FreeOSMemory()
defer reportMemoryUsage("after httpJsonTrace")
// This is an AJAX handler, so instead of http.Error we use log.Printf to log errors.
@@ -188,7 +188,7 @@ func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
log.Printf("failed to parse goid parameter %q: %v", goids, err)
return
}
- analyzeGoroutines(res)
+ analyzeGoroutines(res.Events)
g, ok := gs[goid]
if !ok {
log.Printf("failed to find goroutine %d", goid)
@@ -202,7 +202,7 @@ func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
params.endTime = lastTimestamp()
}
params.maing = goid
- params.gs = res.RelatedGoroutines(goid)
+ params.gs = trace.RelatedGoroutines(res.Events, goid)
} else if taskids := r.FormValue("taskid"); taskids != "" {
taskid, err := strconv.ParseUint(taskids, 10, 64)
if err != nil {
@@ -264,13 +264,12 @@ func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
}
c := viewerDataTraceConsumer(w, start, end)
- if err := generateTrace(res, params, c); err != nil {
+ if err := generateTrace(params, c); err != nil {
log.Printf("failed to generate trace: %v", err)
return
}
}
-// Range is a named range
type Range struct {
Name string
Start int
@@ -286,13 +285,13 @@ func (r Range) URL() string {
// splitTrace splits the trace into a number of ranges,
// each resulting in approx 100MB of json output
// (trace viewer can hardly handle more).
-func splitTrace(res *trace.Parsed) []Range {
+func splitTrace(res trace.ParseResult) []Range {
params := &traceParams{
parsed: res,
endTime: math.MaxInt64,
}
s, c := splittingTraceConsumer(100 << 20) // 100M
- if err := generateTrace(res, params, c); err != nil {
+ if err := generateTrace(params, c); err != nil {
dief("%v\n", err)
}
return s.Ranges
@@ -309,7 +308,7 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) {
}
var (
- data = viewerData{Frames: make(map[string]viewerFrame)}
+ data = ViewerData{Frames: make(map[string]ViewerFrame)}
sizes []eventSz
cw countingWriter
@@ -321,7 +320,7 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) {
consumeTimeUnit: func(unit string) {
data.TimeUnit = unit
},
- consumeViewerEvent: func(v *viewerEvent, required bool) {
+ consumeViewerEvent: func(v *ViewerEvent, required bool) {
if required {
// Store required events inside data
// so flush can include them in the required
@@ -334,7 +333,7 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) {
sizes = append(sizes, eventSz{v.Time, cw.size + 1}) // +1 for ",".
cw.size = 0
},
- consumeViewerFrame: func(k string, v viewerFrame) {
+ consumeViewerFrame: func(k string, v ViewerFrame) {
data.Frames[k] = v
},
flush: func() {
@@ -395,7 +394,7 @@ func (cw *countingWriter) Write(data []byte) (int, error) {
}
type traceParams struct {
- parsed *trace.Parsed
+ parsed trace.ParseResult
mode traceviewMode
startTime int64
endTime int64
@@ -412,7 +411,6 @@ const (
)
type traceContext struct {
- res *trace.Parsed
*traceParams
consumer traceConsumer
frameTree frameNode
@@ -463,16 +461,16 @@ type gInfo struct {
markAssist *trace.Event // if non-nil, the mark assist currently running.
}
-type viewerData struct {
- Events []*viewerEvent `json:"traceEvents"`
- Frames map[string]viewerFrame `json:"stackFrames"`
+type ViewerData struct {
+ Events []*ViewerEvent `json:"traceEvents"`
+ Frames map[string]ViewerFrame `json:"stackFrames"`
TimeUnit string `json:"displayTimeUnit"`
// This is where mandatory part of the trace starts (e.g. thread names)
footer int
}
-type viewerEvent struct {
+type ViewerEvent struct {
Name string `json:"name,omitempty"`
Phase string `json:"ph"`
Scope string `json:"s,omitempty"`
@@ -488,33 +486,33 @@ type viewerEvent struct {
Category string `json:"cat,omitempty"`
}
-type viewerFrame struct {
+type ViewerFrame struct {
Name string `json:"name"`
Parent int `json:"parent,omitempty"`
}
-type nameArg struct {
+type NameArg struct {
Name string `json:"name"`
}
-type taskArg struct {
+type TaskArg struct {
ID uint64 `json:"id"`
StartG uint64 `json:"start_g,omitempty"`
EndG uint64 `json:"end_g,omitempty"`
}
-type regionArg struct {
+type RegionArg struct {
TaskID uint64 `json:"taskid,omitempty"`
}
-type sortIndexArg struct {
+type SortIndexArg struct {
Index int `json:"sort_index"`
}
type traceConsumer struct {
consumeTimeUnit func(unit string)
- consumeViewerEvent func(v *viewerEvent, required bool)
- consumeViewerFrame func(key string, f viewerFrame)
+ consumeViewerEvent func(v *ViewerEvent, required bool)
+ consumeViewerFrame func(key string, f ViewerFrame)
flush func()
}
@@ -531,15 +529,15 @@ const (
// If mode==goroutineMode, generate trace for goroutine goid, otherwise whole trace.
// startTime, endTime determine part of the trace that we are interested in.
// gset restricts goroutines that are included in the resulting trace.
-func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsumer) error {
+func generateTrace(params *traceParams, consumer traceConsumer) error {
defer consumer.flush()
- ctx := &traceContext{res: res, traceParams: params}
+ ctx := &traceContext{traceParams: params}
ctx.frameTree.children = make(map[uint64]frameNode)
ctx.consumer = consumer
ctx.consumer.consumeTimeUnit("ns")
- maxProc := int32(0)
+ maxProc := 0
ginfos := make(map[uint64]*gInfo)
stacks := params.parsed.Stacks
@@ -584,12 +582,12 @@ func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsume
newG := ev.Args[0]
info := getGInfo(newG)
if info.name != "" {
- return fmt.Errorf("duplicate go create event for go id=%d detected at time %d", newG, ev.Ts)
+ return fmt.Errorf("duplicate go create event for go id=%d detected at offset %d", newG, ev.Off)
}
- stk, ok := stacks[uint32(ev.Args[1])]
+ stk, ok := stacks[ev.Args[1]]
if !ok || len(stk) == 0 {
- return fmt.Errorf("invalid go create event: missing stack information for go id=%d at time %d", newG, ev.Ts)
+ return fmt.Errorf("invalid go create event: missing stack information for go id=%d at offset %d", newG, ev.Off)
}
fname := stk[0].Fn
@@ -760,23 +758,23 @@ func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsume
ctx.emitSectionFooter(procsSection, "PROCS", 2)
}
- ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &nameArg{"GC"}})
- ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &sortIndexArg{-6}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &NameArg{"GC"}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &SortIndexArg{-6}})
- ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &nameArg{"Network"}})
- ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &sortIndexArg{-5}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &NameArg{"Network"}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &SortIndexArg{-5}})
- ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &nameArg{"Timers"}})
- ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &sortIndexArg{-4}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &NameArg{"Timers"}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &SortIndexArg{-4}})
- ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &nameArg{"Syscalls"}})
- ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &sortIndexArg{-3}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &NameArg{"Syscalls"}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &SortIndexArg{-3}})
// Display rows for Ps if we are in the default trace view mode (not goroutine-oriented presentation)
if ctx.mode&modeGoroutineOriented == 0 {
- for i := 0; i <= int(maxProc); i++ {
- ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &nameArg{fmt.Sprintf("Proc %v", i)}})
- ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &sortIndexArg{i}})
+ for i := 0; i <= maxProc; i++ {
+ ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &NameArg{fmt.Sprintf("Proc %v", i)}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &SortIndexArg{i}})
}
}
@@ -814,27 +812,27 @@ func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsume
if !ctx.gs[k] {
continue
}
- ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: k, Arg: &nameArg{v.name}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: k, Arg: &NameArg{v.name}})
}
// Row for the main goroutine (maing)
- ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: ctx.maing, Arg: &sortIndexArg{-2}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: ctx.maing, Arg: &SortIndexArg{-2}})
// Row for GC or global state (specified with G=0)
- ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: 0, Arg: &sortIndexArg{-1}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: 0, Arg: &SortIndexArg{-1}})
}
return nil
}
-func (ctx *traceContext) emit(e *viewerEvent) {
+func (ctx *traceContext) emit(e *ViewerEvent) {
ctx.consumer.consumeViewerEvent(e, false)
}
-func (ctx *traceContext) emitFooter(e *viewerEvent) {
+func (ctx *traceContext) emitFooter(e *ViewerEvent) {
ctx.consumer.consumeViewerEvent(e, true)
}
func (ctx *traceContext) emitSectionFooter(sectionID uint64, name string, priority int) {
- ctx.emitFooter(&viewerEvent{Name: "process_name", Phase: "M", Pid: sectionID, Arg: &nameArg{name}})
- ctx.emitFooter(&viewerEvent{Name: "process_sort_index", Phase: "M", Pid: sectionID, Arg: &sortIndexArg{priority}})
+ ctx.emitFooter(&ViewerEvent{Name: "process_name", Phase: "M", Pid: sectionID, Arg: &NameArg{name}})
+ ctx.emitFooter(&ViewerEvent{Name: "process_sort_index", Phase: "M", Pid: sectionID, Arg: &SortIndexArg{priority}})
}
func (ctx *traceContext) time(ev *trace.Event) float64 {
@@ -856,30 +854,31 @@ func tsWithinRange(ts, s, e int64) bool {
func (ctx *traceContext) proc(ev *trace.Event) uint64 {
if ctx.mode&modeGoroutineOriented != 0 && ev.P < trace.FakeP {
return ev.G
+ } else {
+ return uint64(ev.P)
}
- return uint64(ev.P)
}
func (ctx *traceContext) emitSlice(ev *trace.Event, name string) {
ctx.emit(ctx.makeSlice(ev, name))
}
-func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *viewerEvent {
- // If viewerEvent.Dur is not a positive value,
+func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *ViewerEvent {
+ // If ViewerEvent.Dur is not a positive value,
// trace viewer handles it as a non-terminating time interval.
// Avoid it by setting the field with a small value.
durationUsec := ctx.time(ev.Link) - ctx.time(ev)
- if ev.Link == nil || ev.Link.Ts-ev.Ts <= 0 {
+ if ev.Link.Ts-ev.Ts <= 0 {
durationUsec = 0.0001 // 0.1 nanoseconds
}
- sl := &viewerEvent{
+ sl := &ViewerEvent{
Name: name,
Phase: "X",
Time: ctx.time(ev),
Dur: durationUsec,
Tid: ctx.proc(ev),
- Stack: ctx.stack(ctx.res.Stacks[ev.StkID]),
- EndStack: ctx.stack(ctx.res.Stacks[ev.Link.StkID]),
+ Stack: ctx.stack(ev.Stk),
+ EndStack: ctx.stack(ev.Link.Stk),
}
// grey out non-overlapping events if the event is not a global event (ev.G == 0)
@@ -889,7 +888,7 @@ func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *viewerEvent {
type Arg struct {
P int
}
- sl.Arg = &Arg{P: int(ev.P)}
+ sl.Arg = &Arg{P: ev.P}
}
// grey out non-overlapping events.
overlapping := false
@@ -911,10 +910,10 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) {
taskName := task.name
durationUsec := float64(task.lastTimestamp()-task.firstTimestamp()) / 1e3
- ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &nameArg{fmt.Sprintf("T%d %s", task.id, taskName)}})
- ctx.emit(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &sortIndexArg{sortIndex}})
+ ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &NameArg{fmt.Sprintf("T%d %s", task.id, taskName)}})
+ ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &SortIndexArg{sortIndex}})
ts := float64(task.firstTimestamp()) / 1e3
- sl := &viewerEvent{
+ sl := &ViewerEvent{
Name: taskName,
Phase: "X",
Time: ts,
@@ -923,13 +922,13 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) {
Tid: taskRow,
Cname: pickTaskColor(task.id),
}
- targ := taskArg{ID: task.id}
+ targ := TaskArg{ID: task.id}
if task.create != nil {
- sl.Stack = ctx.stack(ctx.res.Stacks[task.create.StkID])
+ sl.Stack = ctx.stack(task.create.Stk)
targ.StartG = task.create.G
}
if task.end != nil {
- sl.EndStack = ctx.stack(ctx.res.Stacks[task.end.StkID])
+ sl.EndStack = ctx.stack(task.end.Stk)
targ.EndG = task.end.G
}
sl.Arg = targ
@@ -937,8 +936,8 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) {
if task.create != nil && task.create.Type == trace.EvUserTaskCreate && task.create.Args[1] != 0 {
ctx.arrowSeq++
- ctx.emit(&viewerEvent{Name: "newTask", Phase: "s", Tid: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
- ctx.emit(&viewerEvent{Name: "newTask", Phase: "t", Tid: taskRow, ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
+ ctx.emit(&ViewerEvent{Name: "newTask", Phase: "s", Tid: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
+ ctx.emit(&ViewerEvent{Name: "newTask", Phase: "t", Tid: taskRow, ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
}
}
@@ -959,7 +958,7 @@ func (ctx *traceContext) emitRegion(s regionDesc) {
scopeID := fmt.Sprintf("%x", id)
name := s.Name
- sl0 := &viewerEvent{
+ sl0 := &ViewerEvent{
Category: "Region",
Name: name,
Phase: "b",
@@ -970,11 +969,11 @@ func (ctx *traceContext) emitRegion(s regionDesc) {
Cname: pickTaskColor(s.TaskID),
}
if s.Start != nil {
- sl0.Stack = ctx.stack(ctx.res.Stacks[s.Start.StkID])
+ sl0.Stack = ctx.stack(s.Start.Stk)
}
ctx.emit(sl0)
- sl1 := &viewerEvent{
+ sl1 := &ViewerEvent{
Category: "Region",
Name: name,
Phase: "e",
@@ -983,10 +982,10 @@ func (ctx *traceContext) emitRegion(s regionDesc) {
ID: uint64(regionID),
Scope: scopeID,
Cname: pickTaskColor(s.TaskID),
- Arg: regionArg{TaskID: s.TaskID},
+ Arg: RegionArg{TaskID: s.TaskID},
}
if s.End != nil {
- sl1.Stack = ctx.stack(ctx.res.Stacks[s.End.StkID])
+ sl1.Stack = ctx.stack(s.End.Stk)
}
ctx.emit(sl1)
}
@@ -1005,7 +1004,7 @@ func (ctx *traceContext) emitHeapCounters(ev *trace.Event) {
diff = ctx.heapStats.nextGC - ctx.heapStats.heapAlloc
}
if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
- ctx.emit(&viewerEvent{Name: "Heap", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &heapCountersArg{ctx.heapStats.heapAlloc, diff}})
+ ctx.emit(&ViewerEvent{Name: "Heap", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &heapCountersArg{ctx.heapStats.heapAlloc, diff}})
}
ctx.prevHeapStats = ctx.heapStats
}
@@ -1021,7 +1020,7 @@ func (ctx *traceContext) emitGoroutineCounters(ev *trace.Event) {
return
}
if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
- ctx.emit(&viewerEvent{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &goroutineCountersArg{uint64(ctx.gstates[gRunning]), uint64(ctx.gstates[gRunnable]), uint64(ctx.gstates[gWaitingGC])}})
+ ctx.emit(&ViewerEvent{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &goroutineCountersArg{uint64(ctx.gstates[gRunning]), uint64(ctx.gstates[gRunnable]), uint64(ctx.gstates[gWaitingGC])}})
}
ctx.prevGstates = ctx.gstates
}
@@ -1036,7 +1035,7 @@ func (ctx *traceContext) emitThreadCounters(ev *trace.Event) {
return
}
if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
- ctx.emit(&viewerEvent{Name: "Threads", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &threadCountersArg{
+ ctx.emit(&ViewerEvent{Name: "Threads", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &threadCountersArg{
Running: ctx.threadStats.prunning,
InSyscall: ctx.threadStats.insyscall}})
}
@@ -1074,14 +1073,14 @@ func (ctx *traceContext) emitInstant(ev *trace.Event, name, category string) {
}
arg = &Arg{ev.Args[0]}
}
- ctx.emit(&viewerEvent{
+ ctx.emit(&ViewerEvent{
Name: name,
Category: category,
Phase: "I",
Scope: "t",
Time: ctx.time(ev),
Tid: ctx.proc(ev),
- Stack: ctx.stack(ctx.res.Stacks[ev.StkID]),
+ Stack: ctx.stack(ev.Stk),
Cname: cname,
Arg: arg})
}
@@ -1118,11 +1117,8 @@ func (ctx *traceContext) emitArrow(ev *trace.Event, name string) {
}
ctx.arrowSeq++
- ctx.emit(&viewerEvent{Name: name, Phase: "s", Tid: ctx.proc(ev),
- ID: ctx.arrowSeq, Time: ctx.time(ev),
- Stack: ctx.stack(ctx.res.Stacks[ev.StkID]), Cname: color})
- ctx.emit(&viewerEvent{Name: name, Phase: "t", Tid: ctx.proc(ev.Link),
- ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color})
+ ctx.emit(&ViewerEvent{Name: name, Phase: "s", Tid: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.stack(ev.Stk), Cname: color})
+ ctx.emit(&ViewerEvent{Name: name, Phase: "t", Tid: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color})
}
func (ctx *traceContext) stack(stk []*trace.Frame) int {
@@ -1144,7 +1140,7 @@ func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
node.id = ctx.frameSeq
node.children = make(map[uint64]frameNode)
parent.children[frame.PC] = node
- ctx.consumer.consumeViewerFrame(strconv.Itoa(node.id), viewerFrame{fmt.Sprintf("%v:%v", frame.Fn, frame.Line), parent.id})
+ ctx.consumer.consumeViewerFrame(strconv.Itoa(node.id), ViewerFrame{fmt.Sprintf("%v:%v", frame.Fn, frame.Line), parent.id})
}
return ctx.buildBranch(node, stk)
}
@@ -1179,7 +1175,7 @@ type jsonWriter struct {
}
func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer {
- frames := make(map[string]viewerFrame)
+ frames := make(map[string]ViewerFrame)
enc := json.NewEncoder(w)
written := 0
index := int64(-1)
@@ -1191,7 +1187,7 @@ func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer {
enc.Encode(unit)
io.WriteString(w, ",")
},
- consumeViewerEvent: func(v *viewerEvent, required bool) {
+ consumeViewerEvent: func(v *ViewerEvent, required bool) {
index++
if !required && (index < start || index > end) {
// not in the range. Skip!
@@ -1208,7 +1204,7 @@ func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer {
// Same should be applied to splittingTraceConsumer.
written++
},
- consumeViewerFrame: func(k string, v viewerFrame) {
+ consumeViewerFrame: func(k string, v ViewerFrame) {
frames[k] = v
},
flush: func() {
diff --git a/src/cmd/trace/trace_test.go b/src/cmd/trace/trace_test.go
index abeb330924..9e90f50d4b 100644
--- a/src/cmd/trace/trace_test.go
+++ b/src/cmd/trace/trace_test.go
@@ -8,27 +8,26 @@ package main
import (
"context"
+ "internal/trace"
"io/ioutil"
rtrace "runtime/trace"
"strings"
"testing"
-
- trace "internal/traceparser"
)
// stacks is a fake stack map populated for test.
-type stacks map[uint32][]*trace.Frame
+type stacks map[uint64][]*trace.Frame
// add adds a stack with a single frame whose Fn field is
// set to the provided fname and returns a unique stack id.
func (s *stacks) add(fname string) uint64 {
if *s == nil {
- *s = make(map[uint32][]*trace.Frame)
+ *s = make(map[uint64][]*trace.Frame)
}
- id := uint32(len(*s))
+ id := uint64(len(*s))
(*s)[id] = []*trace.Frame{{Fn: fname}}
- return uint64(id)
+ return id
}
// TestGoroutineCount tests runnable/running goroutine counts computed by generateTrace
@@ -37,7 +36,8 @@ func (s *stacks) add(fname string) uint64 {
// - the counts must not include goroutines blocked waiting on channels or in syscall.
func TestGoroutineCount(t *testing.T) {
w := trace.NewWriter()
- w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
+ w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
+ w.Emit(trace.EvFrequency, 1) // [ticks per second]
var s stacks
@@ -61,9 +61,8 @@ func TestGoroutineCount(t *testing.T) {
w.Emit(trace.EvGoCreate, 1, 40, s.add("pkg.f4"), s.add("main.f4"))
w.Emit(trace.EvGoStartLocal, 1, 40) // [timestamp, goroutine id]
w.Emit(trace.EvGoSched, 1, s.add("main.f4")) // [timestamp, stack]
- w.Emit(trace.EvFrequency, 1) // [ticks per second]
- res, err := trace.ParseBuffer(w)
+ res, err := trace.Parse(w, "")
if err != nil {
t.Fatalf("failed to parse test trace: %v", err)
}
@@ -75,9 +74,9 @@ func TestGoroutineCount(t *testing.T) {
}
// Use the default viewerDataTraceConsumer but replace
- // consumeViewerEvent to intercept the viewerEvents for testing.
+ // consumeViewerEvent to intercept the ViewerEvents for testing.
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
- c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
+ c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
if ev.Name == "Goroutines" {
cnt := ev.Arg.(*goroutineCountersArg)
if cnt.Runnable+cnt.Running > 2 {
@@ -88,7 +87,7 @@ func TestGoroutineCount(t *testing.T) {
}
// If the counts drop below 0, generateTrace will return an error.
- if err := generateTrace(res, params, c); err != nil {
+ if err := generateTrace(params, c); err != nil {
t.Fatalf("generateTrace failed: %v", err)
}
}
@@ -100,7 +99,8 @@ func TestGoroutineFilter(t *testing.T) {
var s stacks
w := trace.NewWriter()
- w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
+ w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
+ w.Emit(trace.EvFrequency, 1) // [ticks per second]
// goroutine 10: blocked
w.Emit(trace.EvGoCreate, 1, 10, s.add("pkg.f1"), s.add("main.f1")) // [timestamp, new goroutine id, new stack id, stack id]
@@ -115,9 +115,8 @@ func TestGoroutineFilter(t *testing.T) {
// goroutine 10: runnable->running->block
w.Emit(trace.EvGoStartLocal, 1, 10) // [timestamp, goroutine id]
w.Emit(trace.EvGoBlock, 1, s.add("pkg.f3")) // [timestamp, stack]
- w.Emit(trace.EvFrequency, 1) // [ticks per second]
- res, err := trace.ParseBuffer(w)
+ res, err := trace.Parse(w, "")
if err != nil {
t.Fatalf("failed to parse test trace: %v", err)
}
@@ -130,14 +129,15 @@ func TestGoroutineFilter(t *testing.T) {
}
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
- if err := generateTrace(res, params, c); err != nil {
+ if err := generateTrace(params, c); err != nil {
t.Fatalf("generateTrace failed: %v", err)
}
}
func TestPreemptedMarkAssist(t *testing.T) {
w := trace.NewWriter()
- w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
+ w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
+ w.Emit(trace.EvFrequency, 1) // [ticks per second]
var s stacks
// goroutine 9999: running -> mark assisting -> preempted -> assisting -> running -> block
@@ -148,13 +148,11 @@ func TestPreemptedMarkAssist(t *testing.T) {
w.Emit(trace.EvGoStartLocal, 1, 9999) // [timestamp, goroutine id]
w.Emit(trace.EvGCMarkAssistDone, 1) // [timestamp]
w.Emit(trace.EvGoBlock, 1, s.add("main.f2")) // [timestamp, stack]
- w.Emit(trace.EvFrequency, 1) // [ticks per second]
- res, err := trace.ParseBuffer(w)
+ res, err := trace.Parse(w, "")
if err != nil {
t.Fatalf("failed to parse test trace: %v", err)
}
- t.Logf("%+v", *res)
res.Stacks = s // use fake stacks
params := &traceParams{
@@ -165,12 +163,12 @@ func TestPreemptedMarkAssist(t *testing.T) {
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
marks := 0
- c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
+ c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
if strings.Contains(ev.Name, "MARK ASSIST") {
marks++
}
}
- if err := generateTrace(res, params, c); err != nil {
+ if err := generateTrace(params, c); err != nil {
t.Fatalf("generateTrace failed: %v", err)
}
@@ -216,7 +214,7 @@ func TestFoo(t *testing.T) {
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
var logBeforeTaskEnd, logAfterTaskEnd bool
- c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
+ c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
if ev.Name == "log before task ends" {
logBeforeTaskEnd = true
}
@@ -224,7 +222,7 @@ func TestFoo(t *testing.T) {
logAfterTaskEnd = true
}
}
- if err := generateTrace(res, params, c); err != nil {
+ if err := generateTrace(params, c); err != nil {
t.Fatalf("generateTrace failed: %v", err)
}
if !logBeforeTaskEnd {
diff --git a/src/cmd/trace/trace_unix_test.go b/src/cmd/trace/trace_unix_test.go
index 144642ad9e..fec060e121 100644
--- a/src/cmd/trace/trace_unix_test.go
+++ b/src/cmd/trace/trace_unix_test.go
@@ -8,7 +8,7 @@ package main
import (
"bytes"
- "internal/traceparser"
+ traceparser "internal/trace"
"io/ioutil"
"runtime"
"runtime/trace"
@@ -73,15 +73,17 @@ func TestGoroutineInSyscall(t *testing.T) {
}
trace.Stop()
- res, err := traceparser.ParseBuffer(buf)
- if err != nil {
+ res, err := traceparser.Parse(buf, "")
+ if err == traceparser.ErrTimeOrder {
+ t.Skipf("skipping due to golang.org/issue/16755 (timestamps are unreliable): %v", err)
+ } else if err != nil {
t.Fatalf("failed to parse trace: %v", err)
}
// Check only one thread for the pipe read goroutine is
// considered in-syscall.
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
- c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
+ c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
if ev.Name == "Threads" {
arg := ev.Arg.(*threadCountersArg)
if arg.InSyscall > 1 {
@@ -94,7 +96,7 @@ func TestGoroutineInSyscall(t *testing.T) {
parsed: res,
endTime: int64(1<<63 - 1),
}
- if err := generateTrace(res, param, c); err != nil {
+ if err := generateTrace(param, c); err != nil {
t.Fatalf("failed to generate ViewerData: %v", err)
}
}