aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/export_test.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-08-04 17:29:03 +0000
committerMichael Knyszek <mknyszek@google.com>2020-10-26 18:28:56 +0000
commit79781e8dd382ac34e502ed6a088dff6860a08c05 (patch)
tree37ad5dea76bd2dabfb360c05e6e9f040220a9739 /src/runtime/export_test.go
parentf77a9025f1e4bf4bb3e2b582d13cce5f19c1ca51 (diff)
downloadgo-79781e8dd382ac34e502ed6a088dff6860a08c05.tar.gz
go-79781e8dd382ac34e502ed6a088dff6860a08c05.zip
runtime: move malloc stats into consistentHeapStats
This change moves the mcache-local malloc stats into the consistentHeapStats structure so the malloc stats can be managed consistently with the memory stats. The one exception here is tinyAllocs for which moving that into the global stats would incur several atomic writes on the fast path. Microbenchmarks for just one CPU core have shown a 50% loss in throughput. Since tiny allocation counnt isn't exposed anyway and is always blindly added to both allocs and frees, let that stay inconsistent and flush the tiny allocation count every so often. Change-Id: I2a4b75f209c0e659b9c0db081a3287bf227c10ca Reviewed-on: https://go-review.googlesource.com/c/go/+/247039 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Trust: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime/export_test.go')
-rw-r--r--src/runtime/export_test.go37
1 files changed, 13 insertions, 24 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index cb753ee819..ff901fd7be 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -337,33 +337,22 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
}
- // Add in frees. readmemstats_m flushed the cached stats, so
- // these are up-to-date.
- var tinyAllocs, largeFree, smallFree uint64
- for _, p := range allp {
- c := p.mcache
- if c == nil {
- continue
- }
- // Collect large allocation stats.
- largeFree += uint64(c.largeFree)
- slow.Frees += uint64(c.largeFreeCount)
-
- // Collect tiny allocation stats.
- tinyAllocs += uint64(c.tinyAllocCount)
-
- // Collect per-sizeclass stats.
- for i := 0; i < _NumSizeClasses; i++ {
- slow.Frees += uint64(c.smallFreeCount[i])
- bySize[i].Frees += uint64(c.smallFreeCount[i])
- bySize[i].Mallocs += uint64(c.smallFreeCount[i])
- smallFree += uint64(c.smallFreeCount[i]) * uint64(class_to_size[i])
- }
+ // Add in frees by just reading the stats for those directly.
+ var m heapStatsDelta
+ memstats.heapStats.unsafeRead(&m)
+
+ // Collect per-sizeclass free stats.
+ var smallFree uint64
+ for i := 0; i < _NumSizeClasses; i++ {
+ slow.Frees += uint64(m.smallFreeCount[i])
+ bySize[i].Frees += uint64(m.smallFreeCount[i])
+ bySize[i].Mallocs += uint64(m.smallFreeCount[i])
+ smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
}
- slow.Frees += tinyAllocs
+ slow.Frees += memstats.tinyallocs + uint64(m.largeFreeCount)
slow.Mallocs += slow.Frees
- slow.TotalAlloc = slow.Alloc + largeFree + smallFree
+ slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
for i := range slow.BySize {
slow.BySize[i].Mallocs = bySize[i].Mallocs