aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mgcwork.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mgcwork.go')
-rw-r--r--src/runtime/mgcwork.go78
1 files changed, 2 insertions, 76 deletions
diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go
index 46101657d5..b3a068661e 100644
--- a/src/runtime/mgcwork.go
+++ b/src/runtime/mgcwork.go
@@ -22,13 +22,6 @@ const (
workbufAlloc = 32 << 10
)
-// throwOnGCWork causes any operations that add pointers to a gcWork
-// buffer to throw.
-//
-// TODO(austin): This is a temporary debugging measure for issue
-// #27993. To be removed before release.
-var throwOnGCWork bool
-
func init() {
if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
throw("bad workbufAlloc")
@@ -93,17 +86,6 @@ type gcWork struct {
// termination check. Specifically, this indicates that this
// gcWork may have communicated work to another gcWork.
flushedWork bool
-
- // pauseGen causes put operations to spin while pauseGen ==
- // gcWorkPauseGen if debugCachedWork is true.
- pauseGen uint32
-
- // putGen is the pauseGen of the last putGen.
- putGen uint32
-
- // pauseStack is the stack at which this P was paused if
- // debugCachedWork is true.
- pauseStack [16]uintptr
}
// Most of the methods of gcWork are go:nowritebarrierrec because the
@@ -122,60 +104,10 @@ func (w *gcWork) init() {
w.wbuf2 = wbuf2
}
-func (w *gcWork) checkPut(ptr uintptr, ptrs []uintptr) {
- if debugCachedWork {
- alreadyFailed := w.putGen == w.pauseGen
- w.putGen = w.pauseGen
- if !canPreemptM(getg().m) {
- // If we were to spin, the runtime may
- // deadlock. Since we can't be preempted, the
- // spin could prevent gcMarkDone from
- // finishing the ragged barrier, which is what
- // releases us from the spin.
- return
- }
- for atomic.Load(&gcWorkPauseGen) == w.pauseGen {
- }
- if throwOnGCWork {
- printlock()
- if alreadyFailed {
- println("runtime: checkPut already failed at this generation")
- }
- println("runtime: late gcWork put")
- if ptr != 0 {
- gcDumpObject("ptr", ptr, ^uintptr(0))
- }
- for _, ptr := range ptrs {
- gcDumpObject("ptrs", ptr, ^uintptr(0))
- }
- println("runtime: paused at")
- for _, pc := range w.pauseStack {
- if pc == 0 {
- break
- }
- f := findfunc(pc)
- if f.valid() {
- // Obviously this doesn't
- // relate to ancestor
- // tracebacks, but this
- // function prints what we
- // want.
- printAncestorTracebackFuncInfo(f, pc)
- } else {
- println("\tunknown PC ", hex(pc), "\n")
- }
- }
- throw("throwOnGCWork")
- }
- }
-}
-
// put enqueues a pointer for the garbage collector to trace.
// obj must point to the beginning of a heap object or an oblet.
//go:nowritebarrierrec
func (w *gcWork) put(obj uintptr) {
- w.checkPut(obj, nil)
-
flushed := false
wbuf := w.wbuf1
// Record that this may acquire the wbufSpans or heap lock to
@@ -214,8 +146,6 @@ func (w *gcWork) put(obj uintptr) {
// otherwise it returns false and the caller needs to call put.
//go:nowritebarrierrec
func (w *gcWork) putFast(obj uintptr) bool {
- w.checkPut(obj, nil)
-
wbuf := w.wbuf1
if wbuf == nil {
return false
@@ -237,8 +167,6 @@ func (w *gcWork) putBatch(obj []uintptr) {
return
}
- w.checkPut(0, obj)
-
flushed := false
wbuf := w.wbuf1
if wbuf == nil {
@@ -360,12 +288,10 @@ func (w *gcWork) balance() {
return
}
if wbuf := w.wbuf2; wbuf.nobj != 0 {
- w.checkPut(0, wbuf.obj[:wbuf.nobj])
putfull(wbuf)
w.flushedWork = true
w.wbuf2 = getempty()
} else if wbuf := w.wbuf1; wbuf.nobj > 4 {
- w.checkPut(0, wbuf.obj[:wbuf.nobj])
w.wbuf1 = handoff(wbuf)
w.flushedWork = true // handoff did putfull
} else {
@@ -445,7 +371,7 @@ func getempty() *workbuf {
}
if s == nil {
systemstack(func() {
- s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
+ s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
})
if s == nil {
throw("out of memory")
@@ -547,7 +473,7 @@ func freeSomeWbufs(preemptible bool) bool {
break
}
work.wbufSpans.free.remove(span)
- mheap_.freeManual(span, &memstats.gc_sys)
+ mheap_.freeManual(span, spanAllocWorkBuf)
}
})
more := !work.wbufSpans.free.isEmpty()