aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2016-03-29 12:28:24 -0400
committerAndrew Gerrand <adg@golang.org>2016-04-14 05:24:53 +0000
commit2644b761935b2974fc2bb76be177621212b408c8 (patch)
tree14f5b46b0209f245fe63841b39244d0cac45e17a
parentbfe54b9b0ebfa96fd8eb6c35365050e1111ee7f3 (diff)
downloadgo-2644b761935b2974fc2bb76be177621212b408c8.tar.gz
go-2644b761935b2974fc2bb76be177621212b408c8.zip
runtime: fix pagesInUse accounting
When we grow the heap, we create a temporary "in use" span for the memory acquired from the OS and then free that span to link it into the heap. Hence, we (1) increase pagesInUse when we make the temporary span so that (2) freeing the span will correctly decrease it. However, currently step (1) increases pagesInUse by the number of pages requested from the heap, while step (2) decreases it by the number of pages requested from the OS (the size of the temporary span). These aren't necessarily the same, since we round up the number of pages we request from the OS, so steps 1 and 2 don't necessarily cancel out like they're supposed to. Over time, this can add up and cause pagesInUse to underflow and wrap around to 2^64. The garbage collector computes the sweep ratio from this, so if this happens, the sweep ratio becomes effectively infinite, causing the first allocation on each P in a sweep cycle to sweep the entire heap. This makes sweeping effectively STW. Fix this by increasing pagesInUse in step 1 by the number of pages requested from the OS, so that the two steps correctly cancel out. We add a test that checks that the running total matches the actual state of the heap. Fixes #15022. For 1.6.x. Change-Id: Iefd9d6abe37d0d447cbdbdf9941662e4f18eeffc Reviewed-on: https://go-review.googlesource.com/21280 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Russ Cox <rsc@golang.org> Reviewed-on: https://go-review.googlesource.com/21456 Reviewed-by: Andrew Gerrand <adg@golang.org>
-rw-r--r--src/runtime/export_test.go16
-rw-r--r--src/runtime/gc_test.go17
-rw-r--r--src/runtime/mheap.go2
3 files changed, 34 insertions, 1 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 5400c1d14e..438090842f 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -171,3 +171,19 @@ func SetTracebackEnv(level string) {
setTraceback(level)
traceback_env = traceback_cache
}
+
+func CountPagesInUse() (pagesInUse, counted uintptr) {
+ stopTheWorld("CountPagesInUse")
+
+ pagesInUse = uintptr(mheap_.pagesInUse)
+
+ for _, s := range h_allspans {
+ if s.state == mSpanInUse {
+ counted += s.npages
+ }
+ }
+
+ startTheWorld()
+
+ return
+}
diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go
index c8c96bb4ee..d53d3ee000 100644
--- a/src/runtime/gc_test.go
+++ b/src/runtime/gc_test.go
@@ -473,3 +473,20 @@ func testIfaceEqual(x interface{}) {
a = true
}
}
+
+func TestPageAccounting(t *testing.T) {
+ // Grow the heap in small increments. This used to drop the
+ // pages-in-use count below zero because of a rounding
+ // mismatch (golang.org/issue/15022).
+ const blockSize = 64 << 10
+ blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
+ for i := range blocks {
+ blocks[i] = new([blockSize]byte)
+ }
+
+ // Check that the running page count matches reality.
+ pagesInUse, counted := runtime.CountPagesInUse()
+ if pagesInUse != counted {
+ t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
+ }
+}
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index e8189547f8..a153df0733 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -671,7 +671,7 @@ func (h *mheap) grow(npage uintptr) bool {
}
atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse
- h.pagesInUse += uint64(npage)
+ h.pagesInUse += uint64(s.npages)
h.freeSpanLocked(s, false, true, 0)
return true
}