aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mcache.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mcache.go')
-rw-r--r--src/runtime/mcache.go63
1 files changed, 28 insertions, 35 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 86a8958b9d..afd5afbddd 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -156,6 +156,25 @@ func (c *mcache) refill(spc spanClass) {
throw("bad sweepgen in refill")
}
mheap_.central[spc].mcentral.uncacheSpan(s)
+
+ // Count up how many slots were used and record it.
+ stats := memstats.heapStats.acquire()
+ slotsUsed := uintptr(s.allocCount) - uintptr(s.allocCountBeforeCache)
+ atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], slotsUsed)
+
+ // Flush tinyAllocs.
+ if spc == tinySpanClass {
+ atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
+ c.tinyAllocs = 0
+ }
+ memstats.heapStats.release()
+
+ // Update heapLive and flush scanAlloc.
+ gcController.update(int64(slotsUsed*s.elemsize), int64(c.scanAlloc))
+ c.scanAlloc = 0
+
+ // Clear the second allocCount just to be safe.
+ s.allocCountBeforeCache = 0
}
// Get a new cached span from the central lists.
@@ -172,24 +191,8 @@ func (c *mcache) refill(spc spanClass) {
// sweeping in the next sweep phase.
s.sweepgen = mheap_.sweepgen + 3
- // Assume all objects from this span will be allocated in the
- // mcache. If it gets uncached, we'll adjust this.
- stats := memstats.heapStats.acquire()
- atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
-
- // Flush tinyAllocs.
- if spc == tinySpanClass {
- atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
- c.tinyAllocs = 0
- }
- memstats.heapStats.release()
-
- // Update heapLive with the same assumption.
- // While we're here, flush scanAlloc, since we have to call
- // revise anyway.
- usedBytes := uintptr(s.allocCount) * s.elemsize
- gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
- c.scanAlloc = 0
+ // Store the current alloc count for accounting later.
+ s.allocCountBeforeCache = s.allocCount
c.alloc[spc] = s
}
@@ -235,26 +238,16 @@ func (c *mcache) releaseAll() {
scanAlloc := int64(c.scanAlloc)
c.scanAlloc = 0
- sg := mheap_.sweepgen
- dHeapLive := int64(0)
for i := range c.alloc {
s := c.alloc[i]
if s != &emptymspan {
- // Adjust nsmallalloc in case the span wasn't fully allocated.
- n := uintptr(s.nelems) - uintptr(s.allocCount)
+ // Adjust smallAllocCount for whatever was allocated.
stats := memstats.heapStats.acquire()
- atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
+ slotsUsed := uintptr(s.allocCount) - uintptr(s.allocCountBeforeCache)
+ atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed)
memstats.heapStats.release()
- if s.sweepgen != sg+1 {
- // refill conservatively counted unallocated slots in gcController.heapLive.
- // Undo this.
- //
- // If this span was cached before sweep, then
- // gcController.heapLive was totally recomputed since
- // caching this span, so we don't do this for
- // stale spans.
- dHeapLive -= int64(n) * int64(s.elemsize)
- }
+ s.allocCountBeforeCache = 0
+
// Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
c.alloc[i] = &emptymspan
@@ -270,8 +263,8 @@ func (c *mcache) releaseAll() {
c.tinyAllocs = 0
memstats.heapStats.release()
- // Updated heapScan and heapLive.
- gcController.update(dHeapLive, scanAlloc)
+ // Updated heapScan.
+ gcController.update(0, scanAlloc)
}
// prepareForSweep flushes c if the system has entered a new sweep phase