aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/trace2runtime.go
blob: 512e53907e60b4a667e61f2cbc1bb5f70271ae3f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build goexperiment.exectracer2

// Runtime -> tracer API.

package runtime

import (
	"runtime/internal/atomic"
	_ "unsafe" // for go:linkname
)

// gTraceState is per-G state for the tracer.
type gTraceState struct {
	traceSchedResourceState
}

// reset resets the gTraceState for a new goroutine.
func (s *gTraceState) reset() {
	s.seq = [2]uint64{}
	// N.B. s.statusTraced is managed and cleared separately.
}

// mTraceState is per-M state for the tracer.
type mTraceState struct {
	seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
	buf     [2]*traceBuf   // Per-M traceBuf for writing. Indexed by trace.gen%2.
	link    *m             // Snapshot of alllink or freelink.
}

// pTraceState is per-P state for the tracer.
type pTraceState struct {
	traceSchedResourceState

	// mSyscallID is the ID of the M this was bound to before entering a syscall.
	mSyscallID int64

	// maySweep indicates the sweep events should be traced.
	// This is used to defer the sweep start event until a span
	// has actually been swept.
	maySweep bool

	// inSweep indicates that at least one sweep event has been traced.
	inSweep bool

	// swept and reclaimed track the number of bytes swept and reclaimed
	// by sweeping in the current sweep loop (while maySweep was true).
	swept, reclaimed uintptr
}

// traceLockInit initializes global trace locks.
func traceLockInit() {
	// Sharing a lock rank here is fine because they should never be accessed
	// together. If they are, we want to find out immediately.
	lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
	lockInit(&trace.stringTab[0].tab.lock, lockRankTraceStrings)
	lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
	lockInit(&trace.stringTab[1].tab.lock, lockRankTraceStrings)
	lockInit(&trace.stackTab[0].tab.lock, lockRankTraceStackTab)
	lockInit(&trace.stackTab[1].tab.lock, lockRankTraceStackTab)
	lockInit(&trace.lock, lockRankTrace)
}

// lockRankMayTraceFlush records the lock ranking effects of a
// potential call to traceFlush.
//
// nosplit because traceAcquire is nosplit.
//
//go:nosplit
func lockRankMayTraceFlush() {
	lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
}

// traceBlockReason is an enumeration of reasons a goroutine might block.
// This is the interface the rest of the runtime uses to tell the
// tracer why a goroutine blocked. The tracer then propagates this information
// into the trace however it sees fit.
//
// Note that traceBlockReasons should not be compared, since reasons that are
// distinct by name may *not* be distinct by value.
type traceBlockReason uint8

const (
	traceBlockGeneric traceBlockReason = iota
	traceBlockForever
	traceBlockNet
	traceBlockSelect
	traceBlockCondWait
	traceBlockSync
	traceBlockChanSend
	traceBlockChanRecv
	traceBlockGCMarkAssist
	traceBlockGCSweep
	traceBlockSystemGoroutine
	traceBlockPreempted
	traceBlockDebugCall
	traceBlockUntilGCEnds
	traceBlockSleep
)

var traceBlockReasonStrings = [...]string{
	traceBlockGeneric:         "unspecified",
	traceBlockForever:         "forever",
	traceBlockNet:             "network",
	traceBlockSelect:          "select",
	traceBlockCondWait:        "sync.(*Cond).Wait",
	traceBlockSync:            "sync",
	traceBlockChanSend:        "chan send",
	traceBlockChanRecv:        "chan receive",
	traceBlockGCMarkAssist:    "GC mark assist wait for work",
	traceBlockGCSweep:         "GC background sweeper wait",
	traceBlockSystemGoroutine: "system goroutine wait",
	traceBlockPreempted:       "preempted",
	traceBlockDebugCall:       "wait for debug call",
	traceBlockUntilGCEnds:     "wait until GC ends",
	traceBlockSleep:           "sleep",
}

// traceGoStopReason is an enumeration of reasons a goroutine might yield.
//
// Note that traceGoStopReasons should not be compared, since reasons that are
// distinct by name may *not* be distinct by value.
type traceGoStopReason uint8

const (
	traceGoStopGeneric traceGoStopReason = iota
	traceGoStopGoSched
	traceGoStopPreempted
)

var traceGoStopReasonStrings = [...]string{
	traceGoStopGeneric:   "unspecified",
	traceGoStopGoSched:   "runtime.Gosched",
	traceGoStopPreempted: "preempted",
}

// traceEnabled returns true if the trace is currently enabled.
//
//go:nosplit
func traceEnabled() bool {
	return trace.gen.Load() != 0
}

// traceShuttingDown returns true if the trace is currently shutting down.
func traceShuttingDown() bool {
	return trace.shutdown.Load()
}

// traceLocker represents an M writing trace events. While a traceLocker value
// is valid, the tracer observes all operations on the G/M/P or trace events being
// written as happening atomically.
type traceLocker struct {
	mp  *m
	gen uintptr
}

// debugTraceReentrancy checks if the trace is reentrant.
//
// This is optional because throwing in a function makes it instantly
// not inlineable, and we want traceAcquire to be inlineable for
// low overhead when the trace is disabled.
const debugTraceReentrancy = false

// traceAcquire prepares this M for writing one or more trace events.
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceAcquire() traceLocker {
	if !traceEnabled() {
		return traceLocker{}
	}
	return traceAcquireEnabled()
}

// traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
// broken out to make traceAcquire inlineable to keep the overhead of the tracer
// when it's disabled low.
//
// nosplit because it's called by traceAcquire, which is nosplit.
//
//go:nosplit
func traceAcquireEnabled() traceLocker {
	// Any time we acquire a traceLocker, we may flush a trace buffer. But
	// buffer flushes are rare. Record the lock edge even if it doesn't happen
	// this time.
	lockRankMayTraceFlush()

	// Prevent preemption.
	mp := acquirem()

	// Acquire the trace seqlock. This prevents traceAdvance from moving forward
	// until all Ms are observed to be outside of their seqlock critical section.
	//
	// Note: The seqlock is mutated here and also in traceCPUSample. If you update
	// usage of the seqlock here, make sure to also look at what traceCPUSample is
	// doing.
	seq := mp.trace.seqlock.Add(1)
	if debugTraceReentrancy && seq%2 != 1 {
		throw("bad use of trace.seqlock or tracer is reentrant")
	}

	// N.B. This load of gen appears redundant with the one in traceEnabled.
	// However, it's very important that the gen we use for writing to the trace
	// is acquired under a traceLocker so traceAdvance can make sure no stale
	// gen values are being used.
	//
	// Because we're doing this load again, it also means that the trace
	// might end up being disabled when we load it. In that case we need to undo
	// what we did and bail.
	gen := trace.gen.Load()
	if gen == 0 {
		mp.trace.seqlock.Add(1)
		releasem(mp)
		return traceLocker{}
	}
	return traceLocker{mp, gen}
}

// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func (tl traceLocker) ok() bool {
	return tl.gen != 0
}

// traceRelease indicates that this M is done writing trace events.
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceRelease(tl traceLocker) {
	seq := tl.mp.trace.seqlock.Add(1)
	if debugTraceReentrancy && seq%2 != 0 {
		print("runtime: seq=", seq, "\n")
		throw("bad use of trace.seqlock")
	}
	releasem(tl.mp)
}

// traceExitingSyscall marks a goroutine as exiting the syscall slow path.
//
// Must be paired with a traceExitedSyscall call.
func traceExitingSyscall() {
	trace.exitingSyscall.Add(1)
}

// traceExitedSyscall marks a goroutine as having exited the syscall slow path.
func traceExitedSyscall() {
	trace.exitingSyscall.Add(-1)
}

// Gomaxprocs emits a ProcsChange event.
func (tl traceLocker) Gomaxprocs(procs int32) {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
}

// ProcStart traces a ProcStart event.
//
// Must be called with a valid P.
func (tl traceLocker) ProcStart() {
	pp := tl.mp.p.ptr()
	// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
	// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
	// is during a syscall.
	tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
}

// ProcStop traces a ProcStop event.
func (tl traceLocker) ProcStop(pp *p) {
	// The only time a goroutine is allowed to have its Proc moved around
	// from under it is during a syscall.
	tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
}

// GCActive traces a GCActive event.
//
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCActive() {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
	// N.B. Only one GC can be running at a time, so this is naturally
	// serialized by the caller.
	trace.seqGC++
}

// GCStart traces a GCBegin event.
//
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCStart() {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
	// N.B. Only one GC can be running at a time, so this is naturally
	// serialized by the caller.
	trace.seqGC++
}

// GCDone traces a GCEnd event.
//
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCDone() {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
	// N.B. Only one GC can be running at a time, so this is naturally
	// serialized by the caller.
	trace.seqGC++
}

// STWStart traces a STWBegin event.
func (tl traceLocker) STWStart(reason stwReason) {
	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
}

// STWDone traces a STWEnd event.
func (tl traceLocker) STWDone() {
	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
}

// GCSweepStart prepares to trace a sweep loop. This does not
// emit any events until traceGCSweepSpan is called.
//
// GCSweepStart must be paired with traceGCSweepDone and there
// must be no preemption points between these two calls.
//
// Must be called with a valid P.
func (tl traceLocker) GCSweepStart() {
	// Delay the actual GCSweepBegin event until the first span
	// sweep. If we don't sweep anything, don't emit any events.
	pp := tl.mp.p.ptr()
	if pp.trace.maySweep {
		throw("double traceGCSweepStart")
	}
	pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
}

// GCSweepSpan traces the sweep of a single span. If this is
// the first span swept since traceGCSweepStart was called, this
// will emit a GCSweepBegin event.
//
// This may be called outside a traceGCSweepStart/traceGCSweepDone
// pair; however, it will not emit any trace events in this case.
//
// Must be called with a valid P.
func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
	pp := tl.mp.p.ptr()
	if pp.trace.maySweep {
		if pp.trace.swept == 0 {
			tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
			pp.trace.inSweep = true
		}
		pp.trace.swept += bytesSwept
	}
}

// GCSweepDone finishes tracing a sweep loop. If any memory was
// swept (i.e. traceGCSweepSpan emitted an event) then this will emit
// a GCSweepEnd event.
//
// Must be called with a valid P.
func (tl traceLocker) GCSweepDone() {
	pp := tl.mp.p.ptr()
	if !pp.trace.maySweep {
		throw("missing traceGCSweepStart")
	}
	if pp.trace.inSweep {
		tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
		pp.trace.inSweep = false
	}
	pp.trace.maySweep = false
}

// GCMarkAssistStart emits a MarkAssistBegin event.
func (tl traceLocker) GCMarkAssistStart() {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
}

// GCMarkAssistDone emits a MarkAssistEnd event.
func (tl traceLocker) GCMarkAssistDone() {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
}

// GoCreate emits a GoCreate event.
func (tl traceLocker) GoCreate(newg *g, pc uintptr) {
	newg.trace.setStatusTraced(tl.gen)
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoCreate, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
}

// GoStart emits a GoStart event.
//
// Must be called with a valid P.
func (tl traceLocker) GoStart() {
	gp := getg().m.curg
	pp := gp.m.p
	w := tl.eventWriter(traceGoRunnable, traceProcRunning)
	w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
		w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
	}
	w.end()
}

// GoEnd emits a GoDestroy event.
//
// TODO(mknyszek): Rename this to GoDestroy.
func (tl traceLocker) GoEnd() {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
}

// GoSched emits a GoStop event with a GoSched reason.
func (tl traceLocker) GoSched() {
	tl.GoStop(traceGoStopGoSched)
}

// GoPreempt emits a GoStop event with a GoPreempted reason.
func (tl traceLocker) GoPreempt() {
	tl.GoStop(traceGoStopPreempted)
}

// GoStop emits a GoStop event with the provided reason.
func (tl traceLocker) GoStop(reason traceGoStopReason) {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
}

// GoPark emits a GoBlock event with the provided reason.
//
// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
// that we have both, and waitReason is way more descriptive.
func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
}

// GoUnpark emits a GoUnblock event.
func (tl traceLocker) GoUnpark(gp *g, skip int) {
	// Emit a GoWaiting status if necessary for the unblocked goroutine.
	w := tl.eventWriter(traceGoRunning, traceProcRunning)
	if !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
		// Careful: don't use the event writer. We never want status or in-progress events
		// to trigger more in-progress events.
		w.w = w.w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist)
	}
	w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
}

// GoSysCall emits a GoSyscallBegin event.
//
// Must be called with a valid P.
func (tl traceLocker) GoSysCall() {
	var skip int
	switch {
	case tracefpunwindoff():
		// Unwind by skipping 1 frame relative to gp.syscallsp which is captured 3
		// results by hard coding the number of frames in between our caller and the
		// actual syscall, see cases below.
		// TODO(felixge): Implement gp.syscallbp to avoid this workaround?
		skip = 1
	case GOOS == "solaris" || GOOS == "illumos":
		// These platforms don't use a libc_read_trampoline.
		skip = 3
	default:
		// Skip the extra trampoline frame used on most systems.
		skip = 4
	}
	// Scribble down the M that the P is currently attached to.
	pp := tl.mp.p.ptr()
	pp.trace.mSyscallID = int64(tl.mp.procid)
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(skip))
}

// GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
// if lostP is true.
//
// lostP must be true in all cases that a goroutine loses its P during a syscall.
// This means it's not sufficient to check if it has no P. In particular, it needs to be
// true in the following cases:
// - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
// - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
// - The goroutine lost its P and acquired a different one, and is now running with that P.
func (tl traceLocker) GoSysExit(lostP bool) {
	ev := traceEvGoSyscallEnd
	procStatus := traceProcSyscall // Procs implicitly enter traceProcSyscall on GoSyscallBegin.
	if lostP {
		ev = traceEvGoSyscallEndBlocked
		procStatus = traceProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
	} else {
		tl.mp.p.ptr().trace.mSyscallID = -1
	}
	tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
}

// ProcSteal indicates that our current M stole a P from another M.
//
// inSyscall indicates that we're stealing the P from a syscall context.
//
// The caller must have ownership of pp.
func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) {
	// Grab the M ID we stole from.
	mStolenFrom := pp.trace.mSyscallID
	pp.trace.mSyscallID = -1

	// The status of the proc and goroutine, if we need to emit one here, is not evident from the
	// context of just emitting this event alone. There are two cases. Either we're trying to steal
	// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
	// ourselves specifically to keep running. The two contexts look different, but can be summarized
	// fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
	// In the latter, we're a goroutine in a syscall.
	goStatus := traceGoRunning
	procStatus := traceProcRunning
	if inSyscall {
		goStatus = traceGoSyscall
		procStatus = traceProcSyscallAbandoned
	}
	w := tl.eventWriter(goStatus, procStatus)

	// Emit the status of the P we're stealing. We may have *just* done this when creating the event
	// writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
	// syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
	// it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
	// at all (e.g. entersyscall_gcwait).
	if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
		// Careful: don't use the event writer. We never want status or in-progress events
		// to trigger more in-progress events.
		w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
	}
	w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
}

// GoSysBlock is a no-op in the new tracer.
func (tl traceLocker) GoSysBlock(pp *p) {
}

// HeapAlloc emits a HeapAlloc event.
func (tl traceLocker) HeapAlloc(live uint64) {
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
}

// HeapGoal reads the current heap goal and emits a HeapGoal event.
func (tl traceLocker) HeapGoal() {
	heapGoal := gcController.heapGoal()
	if heapGoal == ^uint64(0) {
		// Heap-based triggering is disabled.
		heapGoal = 0
	}
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
}

// OneNewExtraM is a no-op in the new tracer. This is worth keeping around though because
// it's a good place to insert a thread-level event about the new extra M.
func (tl traceLocker) OneNewExtraM(_ *g) {
}

// GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
//
// Unlike GoCreate, the caller must be running on gp.
//
// This occurs when C code calls into Go. On pthread platforms it occurs only when
// a C thread calls into Go code for the first time.
func (tl traceLocker) GoCreateSyscall(gp *g) {
	// N.B. We should never trace a status for this goroutine (which we're currently running on),
	// since we want this to appear like goroutine creation.
	gp.trace.setStatusTraced(tl.gen)
	tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
}

// GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
//
// Must not have a P.
//
// This occurs when Go code returns back to C. On pthread platforms it occurs only when
// the C thread is destroyed.
func (tl traceLocker) GoDestroySyscall() {
	// N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
	// that is in the syscall state.
	tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
}

// To access runtime functions from runtime/trace.
// See runtime/trace/annotation.go

// trace_userTaskCreate emits a UserTaskCreate event.
//
//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
func trace_userTaskCreate(id, parentID uint64, taskType string) {
	tl := traceAcquire()
	if !tl.ok() {
		// Need to do this check because the caller won't have it.
		return
	}
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
	traceRelease(tl)
}

// trace_userTaskEnd emits a UserTaskEnd event.
//
//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
func trace_userTaskEnd(id uint64) {
	tl := traceAcquire()
	if !tl.ok() {
		// Need to do this check because the caller won't have it.
		return
	}
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
	traceRelease(tl)
}

// trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event,
// depending on mode (0 == Begin, 1 == End).
//
// TODO(mknyszek): Just make this two functions.
//
//go:linkname trace_userRegion runtime/trace.userRegion
func trace_userRegion(id, mode uint64, name string) {
	tl := traceAcquire()
	if !tl.ok() {
		// Need to do this check because the caller won't have it.
		return
	}
	var ev traceEv
	switch mode {
	case 0:
		ev = traceEvUserRegionBegin
	case 1:
		ev = traceEvUserRegionEnd
	default:
		return
	}
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
	traceRelease(tl)
}

// trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event.
//
//go:linkname trace_userLog runtime/trace.userLog
func trace_userLog(id uint64, category, message string) {
	tl := traceAcquire()
	if !tl.ok() {
		// Need to do this check because the caller won't have it.
		return
	}
	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
	traceRelease(tl)
}

// traceProcFree is called when a P is destroyed.
//
// This must run on the system stack to match the old tracer.
//
//go:systemstack
func traceProcFree(_ *p) {
}

// traceThreadDestroy is called when a thread is removed from
// sched.freem.
//
// mp must not be able to emit trace events anymore.
//
// sched.lock must be held to synchronize with traceAdvance.
func traceThreadDestroy(mp *m) {
	assertLockHeld(&sched.lock)

	// Flush all outstanding buffers to maintain the invariant
	// that an M only has active buffers while on sched.freem
	// or allm.
	//
	// Perform a traceAcquire/traceRelease on behalf of mp to
	// synchronize with the tracer trying to flush our buffer
	// as well.
	seq := mp.trace.seqlock.Add(1)
	if debugTraceReentrancy && seq%2 != 1 {
		throw("bad use of trace.seqlock or tracer is reentrant")
	}
	systemstack(func() {
		lock(&trace.lock)
		for i := range mp.trace.buf {
			if mp.trace.buf[i] != nil {
				// N.B. traceBufFlush accepts a generation, but it
				// really just cares about gen%2.
				traceBufFlush(mp.trace.buf[i], uintptr(i))
				mp.trace.buf[i] = nil
			}
		}
		unlock(&trace.lock)
	})
	seq1 := mp.trace.seqlock.Add(1)
	if seq1 != seq+1 {
		print("runtime: seq1=", seq1, "\n")
		throw("bad use of trace.seqlock")
	}
}

// Not used in the new tracer; solely for compatibility with the old tracer.
// nosplit because it's called from exitsyscall without a P.
//
//go:nosplit
func (_ traceLocker) RecordSyscallExitedTime(_ *g, _ *p) {
}