aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mcache.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mcache.go')
-rw-r--r--src/runtime/mcache.go156
1 files changed, 144 insertions, 12 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 7a7d33ccae..c9342a41c9 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -10,6 +10,7 @@ import (
)
// Per-thread (in Go, per-P) cache for small objects.
+// This includes a small object cache and local allocation stats.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
@@ -19,8 +20,8 @@ import (
type mcache struct {
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
- next_sample uintptr // trigger heap sample after allocating this many bytes
- local_scan uintptr // bytes of scannable heap allocated
+ nextSample uintptr // trigger heap sample after allocating this many bytes
+ scanAlloc uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
@@ -31,9 +32,12 @@ type mcache struct {
// tiny is a heap pointer. Since mcache is in non-GC'd memory,
// we handle it by clearing it in releaseAll during mark
// termination.
- tiny uintptr
- tinyoffset uintptr
- local_tinyallocs uintptr // number of tiny allocs not counted in other stats
+ //
+ // tinyAllocs is the number of tiny allocations performed
+ // by the P that owns this mcache.
+ tiny uintptr
+ tinyoffset uintptr
+ tinyAllocs uintptr
// The rest is not accessed on every malloc.
@@ -41,16 +45,15 @@ type mcache struct {
stackcache [_NumStackOrders]stackfreelist
- // Local allocator stats, flushed during GC.
- local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
- local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
- local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
-
// flushGen indicates the sweepgen during which this mcache
// was last flushed. If flushGen != mheap_.sweepgen, the spans
// in this mcache are stale and need to the flushed so they
// can be swept. This is done in acquirep.
flushGen uint32
+
+ // statsSeq is a counter indicating whether this P is currently
+ // writing any stats. Its value is even when not, odd when it is.
+ statsSeq uint32
}
// A gclink is a node in a linked list of blocks, like mlink,
@@ -93,10 +96,16 @@ func allocmcache() *mcache {
for i := range c.alloc {
c.alloc[i] = &emptymspan
}
- c.next_sample = nextSample()
+ c.nextSample = nextSample()
return c
}
+// freemcache releases resources associated with this
+// mcache and puts the object onto a free list.
+//
+// In some cases there is no way to simply release
+// resources, such as statistics, so donate them to
+// a different mcache (the recipient).
func freemcache(c *mcache) {
systemstack(func() {
c.releaseAll()
@@ -108,12 +117,34 @@ func freemcache(c *mcache) {
// gcworkbuffree(c.gcworkbuf)
lock(&mheap_.lock)
- purgecachedstats(c)
mheap_.cachealloc.free(unsafe.Pointer(c))
unlock(&mheap_.lock)
})
}
+// getMCache is a convenience function which tries to obtain an mcache.
+//
+// Must be running with a P when called (so the caller must be in a
+// non-preemptible state) or must be called during bootstrapping.
+func getMCache() *mcache {
+ // Grab the mcache, since that's where stats live.
+ pp := getg().m.p.ptr()
+ var c *mcache
+ if pp == nil {
+ // We will be called without a P while bootstrapping,
+ // in which case we use mcache0, which is set in mallocinit.
+ // mcache0 is cleared when bootstrapping is complete,
+ // by procresize.
+ c = mcache0
+ if c == nil {
+ throw("getMCache called with no P or outside bootstrapping")
+ }
+ } else {
+ c = pp.mcache
+ }
+ return c
+}
+
// refill acquires a new span of span class spc for c. This span will
// have at least one free object. The current span in c must be full.
//
@@ -148,13 +179,107 @@ func (c *mcache) refill(spc spanClass) {
// sweeping in the next sweep phase.
s.sweepgen = mheap_.sweepgen + 3
+ // Assume all objects from this span will be allocated in the
+ // mcache. If it gets uncached, we'll adjust this.
+ stats := memstats.heapStats.acquire(c)
+ atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
+ memstats.heapStats.release(c)
+
+ // Update heap_live with the same assumption.
+ usedBytes := uintptr(s.allocCount) * s.elemsize
+ atomic.Xadd64(&memstats.heap_live, int64(s.npages*pageSize)-int64(usedBytes))
+
+ // Flush tinyAllocs.
+ if spc == tinySpanClass {
+ atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs))
+ c.tinyAllocs = 0
+ }
+
+ // While we're here, flush scanAlloc, since we have to call
+ // revise anyway.
+ atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
+ c.scanAlloc = 0
+
+ if trace.enabled {
+ // heap_live changed.
+ traceHeapAlloc()
+ }
+ if gcBlackenEnabled != 0 {
+ // heap_live and heap_scan changed.
+ gcController.revise()
+ }
+
c.alloc[spc] = s
}
+// allocLarge allocates a span for a large object.
+func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
+ if size+_PageSize < size {
+ throw("out of memory")
+ }
+ npages := size >> _PageShift
+ if size&_PageMask != 0 {
+ npages++
+ }
+
+ // Deduct credit for this span allocation and sweep if
+ // necessary. mHeap_Alloc will also sweep npages, so this only
+ // pays the debt down to npage pages.
+ deductSweepCredit(npages*_PageSize, npages)
+
+ spc := makeSpanClass(0, noscan)
+ s := mheap_.alloc(npages, spc, needzero)
+ if s == nil {
+ throw("out of memory")
+ }
+ stats := memstats.heapStats.acquire(c)
+ atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize)
+ atomic.Xadduintptr(&stats.largeAllocCount, 1)
+ memstats.heapStats.release(c)
+
+ // Update heap_live and revise pacing if needed.
+ atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
+ if trace.enabled {
+ // Trace that a heap alloc occurred because heap_live changed.
+ traceHeapAlloc()
+ }
+ if gcBlackenEnabled != 0 {
+ gcController.revise()
+ }
+
+ // Put the large span in the mcentral swept list so that it's
+ // visible to the background sweeper.
+ mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
+ s.limit = s.base() + size
+ heapBitsForAddr(s.base()).initSpan(s)
+ return s
+}
+
func (c *mcache) releaseAll() {
+ // Take this opportunity to flush scanAlloc.
+ atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
+ c.scanAlloc = 0
+
+ sg := mheap_.sweepgen
for i := range c.alloc {
s := c.alloc[i]
if s != &emptymspan {
+ // Adjust nsmallalloc in case the span wasn't fully allocated.
+ n := uintptr(s.nelems) - uintptr(s.allocCount)
+ stats := memstats.heapStats.acquire(c)
+ atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
+ memstats.heapStats.release(c)
+ if s.sweepgen != sg+1 {
+ // refill conservatively counted unallocated slots in heap_live.
+ // Undo this.
+ //
+ // If this span was cached before sweep, then
+ // heap_live was totally recomputed since
+ // caching this span, so we don't do this for
+ // stale spans.
+ atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
+ }
+ // Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
c.alloc[i] = &emptymspan
}
@@ -162,6 +287,13 @@ func (c *mcache) releaseAll() {
// Clear tinyalloc pool.
c.tiny = 0
c.tinyoffset = 0
+ atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs))
+ c.tinyAllocs = 0
+
+ // Updated heap_scan and possible heap_live.
+ if gcBlackenEnabled != 0 {
+ gcController.revise()
+ }
}
// prepareForSweep flushes c if the system has entered a new sweep phase