aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2022-11-09 10:55:54 -0500
committerCherry Mui <cherryyz@google.com>2022-11-25 02:37:05 +0000
commit3115ed23bdaa3dcc465feba4100da40c2d22f964 (patch)
treee63e94c60d7f82fe9f24bce74921055c575354b7
parentd96e584773bf0577a4da2ffc9543111ddd80627c (diff)
downloadgo-3115ed23bdaa3dcc465feba4100da40c2d22f964.tar.gz
go-3115ed23bdaa3dcc465feba4100da40c2d22f964.zip
[release-branch.go1.18] runtime: make GC see object as allocated after it is initialized
When the GC is scanning some memory (possibly conservatively), finding a pointer, while concurrently another goroutine is allocating an object at the same address as the found pointer, the GC may see the pointer before the object and/or the heap bits are initialized. This may cause the GC to see bad pointers and possibly crash. To prevent this, we make it that the scanner can only see the object as allocated after the object and the heap bits are initialized. Currently the allocator uses freeindex to find the next available slot, and that code is coupled with updating the free index to a new slot past it. The scanner also uses the freeindex to determine if an object is allocated. This is somewhat racy. This CL makes the scanner use a different field, which is only updated after the object initialization (and a memory barrier). Updates #54596. Fixes #56751. Change-Id: I2a57a226369926e7192c253dd0d21d3faf22297c Reviewed-on: https://go-review.googlesource.com/c/go/+/449017 Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> Run-TryBot: Cherry Mui <cherryyz@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> (cherry picked from commit febe7b8e2a4dd7cce6ab8d02cf79a5430819cbe5) Reviewed-on: https://go-review.googlesource.com/c/go/+/453255
-rw-r--r--src/runtime/malloc.go10
-rw-r--r--src/runtime/mbitmap.go2
-rw-r--r--src/runtime/mgcsweep.go1
-rw-r--r--src/runtime/mheap.go10
4 files changed, 22 insertions, 1 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 6ed6ceade2d..d738644c7ec 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1135,6 +1135,16 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
+ // As x and the heap bits are initialized, update
+ // freeIndexForScan now so x is seen by the GC
+ // (including convervative scan) as an allocated object.
+ // While this pointer can't escape into user code as a
+ // _live_ pointer until we return, conservative scanning
+ // may find a dead pointer that happens to point into this
+ // object. Delaying this update until now ensures that
+ // conservative scanning considers this pointer dead until
+ // this point.
+ span.freeIndexForScan = span.freeindex
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 937968807b0..95d88d8c610 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -220,7 +220,7 @@ func (s *mspan) nextFreeIndex() uintptr {
// been no preemption points since ensuring this (which could allow a
// GC transition, which would allow the state to change).
func (s *mspan) isFree(index uintptr) bool {
- if index < s.freeindex {
+ if index < s.freeIndexForScan {
return false
}
bytep, mask := s.allocBits.bitp(index)
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 0d58f8e0b50..2aa670e1b86 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -623,6 +623,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
s.allocCount = nalloc
s.freeindex = 0 // reset allocation index to start of span.
+ s.freeIndexForScan = 0
if trace.enabled {
getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
}
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index ecbd0a3a492..134387562e1 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -459,6 +459,14 @@ type mspan struct {
limit uintptr // end of data in span
speciallock mutex // guards specials list
specials *special // linked list of special records sorted by offset.
+
+ // freeIndexForScan is like freeindex, except that freeindex is
+ // used by the allocator whereas freeIndexForScan is used by the
+ // GC scanner. They are two fields so that the GC sees the object
+ // is allocated only when the object and the heap bits are
+ // initialized (see also the assignment of freeIndexForScan in
+ // mallocgc, and issue 54596).
+ freeIndexForScan uintptr
}
func (s *mspan) base() uintptr {
@@ -1250,6 +1258,7 @@ HaveSpan:
// Initialize mark and allocation structures.
s.freeindex = 0
+ s.freeIndexForScan = 0
s.allocCache = ^uint64(0) // all 1s indicating all free.
s.gcmarkBits = newMarkBits(s.nelems)
s.allocBits = newAllocBits(s.nelems)
@@ -1565,6 +1574,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
span.specials = nil
span.needzero = 0
span.freeindex = 0
+ span.freeIndexForScan = 0
span.allocBits = nil
span.gcmarkBits = nil
span.state.set(mSpanDead)