aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
authorMartin Möhrmann <moehrmann@google.com>2020-09-14 16:55:34 +0200
committerMartin Möhrmann <moehrmann@google.com>2020-10-14 05:34:32 +0000
commit7c58ef732efd9bf0d0882bb95371ce1909924a75 (patch)
treef2e241a40bc5ac1ee70aabd31430fb03cfbc7f73 /src/runtime/malloc.go
parentf46a5b1e4559191363dbd4f510105dd31ae97aaa (diff)
downloadgo-7c58ef732efd9bf0d0882bb95371ce1909924a75.tar.gz
go-7c58ef732efd9bf0d0882bb95371ce1909924a75.zip
runtime: implement GODEBUG=inittrace=1 support
Setting inittrace=1 causes the runtime to emit a single line to standard error for each package with init work, summarizing the execution time and memory allocation. The emitted debug information for init functions can be used to find bottlenecks or regressions in Go startup performance. Packages with no init function work (user defined or compiler generated) are omitted. Tracing plugin inits is not supported as they can execute concurrently. This would make the implementation of tracing more complex while adding support for a very rare use case. Plugin inits can be traced separately by testing a main package importing the plugins package imports explicitly. $ GODEBUG=inittrace=1 go test init internal/bytealg @0.008 ms, 0 ms clock, 0 bytes, 0 allocs init runtime @0.059 ms, 0.026 ms clock, 0 bytes, 0 allocs init math @0.19 ms, 0.001 ms clock, 0 bytes, 0 allocs init errors @0.22 ms, 0.004 ms clock, 0 bytes, 0 allocs init strconv @0.24 ms, 0.002 ms clock, 32 bytes, 2 allocs init sync @0.28 ms, 0.003 ms clock, 16 bytes, 1 allocs init unicode @0.44 ms, 0.11 ms clock, 23328 bytes, 24 allocs ... Inspired by stapelberg@google.com who instrumented doInit in a prototype to measure init times with GDB. Fixes #41378 Change-Id: Ic37c6a0cfc95488de9e737f5e346b8dbb39174e1 Reviewed-on: https://go-review.googlesource.com/c/go/+/254659 Trust: Martin Möhrmann <moehrmann@google.com> Run-TryBot: Martin Möhrmann <moehrmann@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go56
1 files changed, 35 insertions, 21 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index f7e9b7c4b4..b19d1f2671 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -909,27 +909,34 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return unsafe.Pointer(&zerobase)
}
- if debug.sbrk != 0 {
- align := uintptr(16)
- if typ != nil {
- // TODO(austin): This should be just
- // align = uintptr(typ.align)
- // but that's only 4 on 32-bit platforms,
- // even if there's a uint64 field in typ (see #599).
- // This causes 64-bit atomic accesses to panic.
- // Hence, we use stricter alignment that matches
- // the normal allocator better.
- if size&7 == 0 {
- align = 8
- } else if size&3 == 0 {
- align = 4
- } else if size&1 == 0 {
- align = 2
- } else {
- align = 1
+ if debug.malloc {
+ if debug.sbrk != 0 {
+ align := uintptr(16)
+ if typ != nil {
+ // TODO(austin): This should be just
+ // align = uintptr(typ.align)
+ // but that's only 4 on 32-bit platforms,
+ // even if there's a uint64 field in typ (see #599).
+ // This causes 64-bit atomic accesses to panic.
+ // Hence, we use stricter alignment that matches
+ // the normal allocator better.
+ if size&7 == 0 {
+ align = 8
+ } else if size&3 == 0 {
+ align = 4
+ } else if size&1 == 0 {
+ align = 2
+ } else {
+ align = 1
+ }
}
+ return persistentalloc(size, align, &memstats.other_sys)
+ }
+
+ if inittrace.active && inittrace.id == getg().goid {
+ // Init functions are executed sequentially in a single Go routine.
+ inittrace.allocs += 1
}
- return persistentalloc(size, align, &memstats.other_sys)
}
// assistG is the G to charge for this allocation, or nil if
@@ -1136,8 +1143,15 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
mp.mallocing = 0
releasem(mp)
- if debug.allocfreetrace != 0 {
- tracealloc(x, size, typ)
+ if debug.malloc {
+ if debug.allocfreetrace != 0 {
+ tracealloc(x, size, typ)
+ }
+
+ if inittrace.active && inittrace.id == getg().goid {
+ // Init functions are executed sequentially in a single Go routine.
+ inittrace.bytes += uint64(size)
+ }
}
if rate := MemProfileRate; rate > 0 {