aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mheap.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mheap.go')
-rw-r--r--src/runtime/mheap.go191
1 files changed, 93 insertions, 98 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 1a57bcd66e..14a73c0491 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -128,13 +128,6 @@ type mheap struct {
// This is accessed atomically.
reclaimCredit uintptr
- // Malloc stats.
- largealloc uint64 // bytes allocated for large objects
- nlargealloc uint64 // number of large object allocations
- largefree uint64 // bytes freed for large objects (>maxsmallsize)
- nlargefree uint64 // number of frees for large objects (>maxsmallsize)
- nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
-
// arenas is the heap arena map. It points to the metadata for
// the heap for every arena frame of the entire usable virtual
// address space.
@@ -720,7 +713,7 @@ func (h *mheap) init() {
h.central[i].mcentral.init(spanClass(i))
}
- h.pages.init(&h.lock, &memstats.gc_sys)
+ h.pages.init(&h.lock, &memstats.gcMiscSys)
}
// reclaim sweeps and reclaims at least npage pages into the heap.
@@ -868,6 +861,22 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
return nFreed
}
+// spanAllocType represents the type of allocation to make, or
+// the type of allocation to be freed.
+type spanAllocType uint8
+
+const (
+ spanAllocHeap spanAllocType = iota // heap span
+ spanAllocStack // stack span
+ spanAllocPtrScalarBits // unrolled GC prog bitmap span
+ spanAllocWorkBuf // work buf span
+)
+
+// manual returns true if the span allocation is manually managed.
+func (s spanAllocType) manual() bool {
+ return s != spanAllocHeap
+}
+
// alloc allocates a new span of npage pages from the GC'd heap.
//
// spanclass indicates the span's size class and scannability.
@@ -884,7 +893,7 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan
if h.sweepdone == 0 {
h.reclaim(npages)
}
- s = h.allocSpan(npages, false, spanclass, &memstats.heap_inuse)
+ s = h.allocSpan(npages, spanAllocHeap, spanclass)
})
if s != nil {
@@ -909,9 +918,15 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan
// allocManual must be called on the system stack because it may
// acquire the heap lock via allocSpan. See mheap for details.
//
+// If new code is written to call allocManual, do NOT use an
+// existing spanAllocType value and instead declare a new one.
+//
//go:systemstack
-func (h *mheap) allocManual(npages uintptr, stat *uint64) *mspan {
- return h.allocSpan(npages, true, 0, stat)
+func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan {
+ if !typ.manual() {
+ throw("manual span allocation called with non-manually-managed type")
+ }
+ return h.allocSpan(npages, typ, 0)
}
// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
@@ -1073,7 +1088,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) {
// allocSpan allocates an mspan which owns npages worth of memory.
//
-// If manual == false, allocSpan allocates a heap span of class spanclass
+// If typ.manual() == false, allocSpan allocates a heap span of class spanclass
// and updates heap accounting. If manual == true, allocSpan allocates a
// manually-managed span (spanclass is ignored), and the caller is
// responsible for any accounting related to its use of the span. Either
@@ -1088,7 +1103,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) {
// the heap lock and because it must block GC transitions.
//
//go:systemstack
-func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysStat *uint64) (s *mspan) {
+func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
// Function-global state.
gp := getg()
base, scav := uintptr(0), uintptr(0)
@@ -1109,23 +1124,11 @@ func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysS
base, scav = c.alloc(npages)
if base != 0 {
s = h.tryAllocMSpan()
-
- if s != nil && gcBlackenEnabled == 0 && (manual || spanclass.sizeclass() != 0) {
+ if s != nil {
goto HaveSpan
}
- // We're either running duing GC, failed to acquire a mspan,
- // or the allocation is for a large object. This means we
- // have to lock the heap and do a bunch of extra work,
- // so go down the HaveBaseLocked path.
- //
- // We must do this during GC to avoid skew with heap_scan
- // since we flush mcache stats whenever we lock.
- //
- // TODO(mknyszek): It would be nice to not have to
- // lock the heap if it's a large allocation, but
- // it's fine for now. The critical section here is
- // short and large object allocations are relatively
- // infrequent.
+ // We have a base but no mspan, so we need
+ // to lock the heap.
}
}
@@ -1152,39 +1155,6 @@ func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysS
// one now that we have the heap lock.
s = h.allocMSpanLocked()
}
- if !manual {
- // This is a heap span, so we should do some additional accounting
- // which may only be done with the heap locked.
-
- // Transfer stats from mcache to global.
- var c *mcache
- if gp.m.p != 0 {
- c = gp.m.p.ptr().mcache
- } else {
- // This case occurs while bootstrapping.
- // See the similar code in mallocgc.
- c = mcache0
- if c == nil {
- throw("mheap.allocSpan called with no P")
- }
- }
- memstats.heap_scan += uint64(c.local_scan)
- c.local_scan = 0
- memstats.tinyallocs += uint64(c.local_tinyallocs)
- c.local_tinyallocs = 0
-
- // Do some additional accounting if it's a large allocation.
- if spanclass.sizeclass() == 0 {
- mheap_.largealloc += uint64(npages * pageSize)
- mheap_.nlargealloc++
- atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
- }
-
- // Either heap_live or heap_scan could have been updated.
- if gcBlackenEnabled != 0 {
- gcController.revise()
- }
- }
unlock(&h.lock)
HaveSpan:
@@ -1195,12 +1165,10 @@ HaveSpan:
s.needzero = 1
}
nbytes := npages * pageSize
- if manual {
+ if typ.manual() {
s.manualFreeList = 0
s.nelems = 0
s.limit = s.base() + s.npages*pageSize
- // Manually managed memory doesn't count toward heap_sys.
- mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
s.state.set(mSpanManual)
} else {
// We must set span properties before the span is published anywhere
@@ -1254,11 +1222,32 @@ HaveSpan:
// sysUsed all the pages that are actually available
// in the span since some of them might be scavenged.
sysUsed(unsafe.Pointer(base), nbytes)
- mSysStatDec(&memstats.heap_released, scav)
+ atomic.Xadd64(&memstats.heap_released, -int64(scav))
}
// Update stats.
- mSysStatInc(sysStat, nbytes)
- mSysStatDec(&memstats.heap_idle, nbytes)
+ if typ == spanAllocHeap {
+ atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
+ }
+ if typ.manual() {
+ // Manually managed memory doesn't count toward heap_sys.
+ memstats.heap_sys.add(-int64(nbytes))
+ }
+ // Update consistent stats.
+ c := getMCache()
+ stats := memstats.heapStats.acquire(c)
+ atomic.Xaddint64(&stats.committed, int64(scav))
+ atomic.Xaddint64(&stats.released, -int64(scav))
+ switch typ {
+ case spanAllocHeap:
+ atomic.Xaddint64(&stats.inHeap, int64(nbytes))
+ case spanAllocStack:
+ atomic.Xaddint64(&stats.inStacks, int64(nbytes))
+ case spanAllocPtrScalarBits:
+ atomic.Xaddint64(&stats.inPtrScalarBits, int64(nbytes))
+ case spanAllocWorkBuf:
+ atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
+ }
+ memstats.heapStats.release(c)
// Publish the span in various locations.
@@ -1269,7 +1258,7 @@ HaveSpan:
// before that happens) or pageInUse is updated.
h.setSpans(s.base(), npages, s)
- if !manual {
+ if !typ.manual() {
// Mark in-use span in arena page bitmap.
//
// This publishes the span to the page sweeper, so
@@ -1280,11 +1269,6 @@ HaveSpan:
// Update related page sweeper stats.
atomic.Xadd64(&h.pagesInUse, int64(npages))
-
- if trace.enabled {
- // Trace that a heap alloc occurred.
- traceHeapAlloc()
- }
}
// Make sure the newly allocated span will be observed
@@ -1340,8 +1324,11 @@ func (h *mheap) grow(npage uintptr) bool {
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
// just add directly to heap_released.
- mSysStatInc(&memstats.heap_released, asize)
- mSysStatInc(&memstats.heap_idle, asize)
+ atomic.Xadd64(&memstats.heap_released, int64(asize))
+ c := getMCache()
+ stats := memstats.heapStats.acquire(c)
+ atomic.Xaddint64(&stats.released, int64(asize))
+ memstats.heapStats.release(c)
// Recalculate nBase.
// We know this won't overflow, because sysAlloc returned
@@ -1373,29 +1360,20 @@ func (h *mheap) grow(npage uintptr) bool {
// Free the span back into the heap.
func (h *mheap) freeSpan(s *mspan) {
systemstack(func() {
- c := getg().m.p.ptr().mcache
lock(&h.lock)
- memstats.heap_scan += uint64(c.local_scan)
- c.local_scan = 0
- memstats.tinyallocs += uint64(c.local_tinyallocs)
- c.local_tinyallocs = 0
if msanenabled {
// Tell msan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
bytes := s.npages << _PageShift
msanfree(base, bytes)
}
- if gcBlackenEnabled != 0 {
- // heap_scan changed.
- gcController.revise()
- }
- h.freeSpanLocked(s, true, true)
+ h.freeSpanLocked(s, spanAllocHeap)
unlock(&h.lock)
})
}
// freeManual frees a manually-managed span returned by allocManual.
-// stat must be the same as the stat passed to the allocManual that
+// typ must be the same as the spanAllocType passed to the allocManual that
// allocated s.
//
// This must only be called when gcphase == _GCoff. See mSpanState for
@@ -1405,16 +1383,14 @@ func (h *mheap) freeSpan(s *mspan) {
// the heap lock. See mheap for details.
//
//go:systemstack
-func (h *mheap) freeManual(s *mspan, stat *uint64) {
+func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
s.needzero = 1
lock(&h.lock)
- mSysStatDec(stat, s.npages*pageSize)
- mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
- h.freeSpanLocked(s, false, true)
+ h.freeSpanLocked(s, typ)
unlock(&h.lock)
}
-func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
+func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
switch s.state.get() {
case mSpanManual:
if s.allocCount != 0 {
@@ -1434,12 +1410,31 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
throw("mheap.freeSpanLocked - invalid span state")
}
- if acctinuse {
- mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
- }
- if acctidle {
- mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
- }
+ // Update stats.
+ //
+ // Mirrors the code in allocSpan.
+ nbytes := s.npages * pageSize
+ if typ == spanAllocHeap {
+ atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
+ }
+ if typ.manual() {
+ // Manually managed memory doesn't count toward heap_sys, so add it back.
+ memstats.heap_sys.add(int64(nbytes))
+ }
+ // Update consistent stats.
+ c := getMCache()
+ stats := memstats.heapStats.acquire(c)
+ switch typ {
+ case spanAllocHeap:
+ atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
+ case spanAllocStack:
+ atomic.Xaddint64(&stats.inStacks, -int64(nbytes))
+ case spanAllocPtrScalarBits:
+ atomic.Xaddint64(&stats.inPtrScalarBits, -int64(nbytes))
+ case spanAllocWorkBuf:
+ atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
+ }
+ memstats.heapStats.release(c)
// Mark the space as free.
h.pages.free(s.base(), s.npages)
@@ -1982,7 +1977,7 @@ func newArenaMayUnlock() *gcBitsArena {
var result *gcBitsArena
if gcBitsArenas.free == nil {
unlock(&gcBitsArenas.lock)
- result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
+ result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
if result == nil {
throw("runtime: cannot allocate memory")
}