aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mcache.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2016-02-09 17:53:07 -0500
committerAustin Clements <austin@google.com>2017-04-28 22:50:31 +0000
commit1a033b1a70668eb8b3832bd06512d0a8d2e59f57 (patch)
tree057cb53dc298374cde8df697ac280ebb3b06025d /src/runtime/mcache.go
parent390fdead0be0087d10e2e4faff7cb0a12b6a3ec8 (diff)
downloadgo-1a033b1a70668eb8b3832bd06512d0a8d2e59f57.tar.gz
go-1a033b1a70668eb8b3832bd06512d0a8d2e59f57.zip
runtime: separate spans of noscan objects
Currently, we mix objects with pointers and objects without pointers ("noscan" objects) together in memory. As a result, for every object we grey, we have to check that object's heap bits to find out if it's noscan, which adds to the per-object cost of GC. This also hurts the TLB footprint of the garbage collector because it decreases the density of scannable objects at the page level. This commit improves the situation by using separate spans for noscan objects. This will allow a much simpler noscan check (in a follow up CL), eliminate the need to clear the bitmap of noscan objects (in a follow up CL), and improves TLB footprint by increasing the density of scannable objects. This is also a step toward eliminating dead bits, since the current noscan check depends on checking the dead bit of the first word. This has no effect on the heap size of the garbage benchmark. We'll measure the performance change of this after the follow-up optimizations. This is a cherry-pick from dev.garbage commit d491e550c3. The only non-trivial merge conflict was in updatememstats in mstats.go, where we now have to separate the per-spanclass stats from the per-sizeclass stats. Change-Id: I13bdc4869538ece5649a8d2a41c6605371618e40 Reviewed-on: https://go-review.googlesource.com/41251 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mcache.go')
-rw-r--r--src/runtime/mcache.go15
1 files changed, 8 insertions, 7 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index c483310cee..96fb273337 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -33,7 +33,8 @@ type mcache struct {
local_tinyallocs uintptr // number of tiny allocs not counted in other stats
// The rest is not accessed on every malloc.
- alloc [_NumSizeClasses]*mspan // spans to allocate from
+
+ alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
stackcache [_NumStackOrders]stackfreelist
@@ -77,7 +78,7 @@ func allocmcache() *mcache {
lock(&mheap_.lock)
c := (*mcache)(mheap_.cachealloc.alloc())
unlock(&mheap_.lock)
- for i := 0; i < _NumSizeClasses; i++ {
+ for i := range c.alloc {
c.alloc[i] = &emptymspan
}
c.next_sample = nextSample()
@@ -103,12 +104,12 @@ func freemcache(c *mcache) {
// Gets a span that has a free object in it and assigns it
// to be the cached span for the given sizeclass. Returns this span.
-func (c *mcache) refill(sizeclass int32) *mspan {
+func (c *mcache) refill(spc spanClass) *mspan {
_g_ := getg()
_g_.m.locks++
// Return the current cached span to the central lists.
- s := c.alloc[sizeclass]
+ s := c.alloc[spc]
if uintptr(s.allocCount) != s.nelems {
throw("refill of span with free space remaining")
@@ -119,7 +120,7 @@ func (c *mcache) refill(sizeclass int32) *mspan {
}
// Get a new cached span from the central lists.
- s = mheap_.central[sizeclass].mcentral.cacheSpan()
+ s = mheap_.central[spc].mcentral.cacheSpan()
if s == nil {
throw("out of memory")
}
@@ -128,13 +129,13 @@ func (c *mcache) refill(sizeclass int32) *mspan {
throw("span has no free space")
}
- c.alloc[sizeclass] = s
+ c.alloc[spc] = s
_g_.m.locks--
return s
}
func (c *mcache) releaseAll() {
- for i := 0; i < _NumSizeClasses; i++ {
+ for i := range c.alloc {
s := c.alloc[i]
if s != &emptymspan {
mheap_.central[i].mcentral.uncacheSpan(s)