aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mgcwork.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2017-03-20 14:05:48 -0400
committerAustin Clements <austin@google.com>2017-04-13 18:20:44 +0000
commit9cc883a466fdbeba4371cdcc49b4bfdda0253341 (patch)
tree2f8b5bc5e19406ba46f44b9df7e7e535fecfcb8a /src/runtime/mgcwork.go
parent42c121476217595d9eddbded70cfb6500eca8442 (diff)
downloadgo-9cc883a466fdbeba4371cdcc49b4bfdda0253341.tar.gz
go-9cc883a466fdbeba4371cdcc49b4bfdda0253341.zip
runtime: allocate GC workbufs from manually-managed spans
Currently the runtime allocates workbufs from persistent memory, which means they can never be freed. Switch to allocating them from manually-managed heap spans. This doesn't free them yet, but it puts us in a position to do so. For #19325. Change-Id: I94b2512a2f2bbbb456cd9347761b9412e80d2da9 Reviewed-on: https://go-review.googlesource.com/38581 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mgcwork.go')
-rw-r--r--src/runtime/mgcwork.go38
1 files changed, 37 insertions, 1 deletions
diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go
index 1df40d2afe..a9559230de 100644
--- a/src/runtime/mgcwork.go
+++ b/src/runtime/mgcwork.go
@@ -12,8 +12,22 @@ import (
const (
_WorkbufSize = 2048 // in bytes; larger values result in less contention
+
+ // workbufAlloc is the number of bytes to allocate at a time
+ // for new workbufs. This must be a multiple of pageSize and
+ // should be a multiple of _WorkbufSize.
+ //
+ // Larger values reduce workbuf allocation overhead. Smaller
+ // values reduce heap fragmentation.
+ workbufAlloc = 32 << 10
)
+func init() {
+ if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
+ throw("bad workbufAlloc")
+ }
+}
+
// Garbage collector work pool abstraction.
//
// This implements a producer/consumer model for pointers to grey
@@ -318,7 +332,29 @@ func getempty() *workbuf {
}
}
if b == nil {
- b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
+ // Allocate more workbufs.
+ var s *mspan
+ systemstack(func() {
+ s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
+ })
+ if s == nil {
+ throw("out of memory")
+ }
+ // Record the new span in the busy list.
+ lock(&work.wbufSpans.lock)
+ work.wbufSpans.busy.insert(s)
+ unlock(&work.wbufSpans.lock)
+ // Slice up the span into new workbufs. Return one and
+ // put the rest on the empty list.
+ for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
+ newb := (*workbuf)(unsafe.Pointer(s.base() + i))
+ newb.nobj = 0
+ if i == 0 {
+ b = newb
+ } else {
+ putempty(newb)
+ }
+ }
}
return b
}