aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/runtime/malloc.go10
-rw-r--r--src/runtime/mbitmap.go2
-rw-r--r--src/runtime/mgcsweep.go1
-rw-r--r--src/runtime/mheap.go10
4 files changed, 22 insertions, 1 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 6ed6ceade2..d738644c7e 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1135,6 +1135,16 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
+ // As x and the heap bits are initialized, update
+ // freeIndexForScan now so x is seen by the GC
+ // (including convervative scan) as an allocated object.
+ // While this pointer can't escape into user code as a
+ // _live_ pointer until we return, conservative scanning
+ // may find a dead pointer that happens to point into this
+ // object. Delaying this update until now ensures that
+ // conservative scanning considers this pointer dead until
+ // this point.
+ span.freeIndexForScan = span.freeindex
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 937968807b..95d88d8c61 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -220,7 +220,7 @@ func (s *mspan) nextFreeIndex() uintptr {
// been no preemption points since ensuring this (which could allow a
// GC transition, which would allow the state to change).
func (s *mspan) isFree(index uintptr) bool {
- if index < s.freeindex {
+ if index < s.freeIndexForScan {
return false
}
bytep, mask := s.allocBits.bitp(index)
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 0d58f8e0b5..2aa670e1b8 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -623,6 +623,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
s.allocCount = nalloc
s.freeindex = 0 // reset allocation index to start of span.
+ s.freeIndexForScan = 0
if trace.enabled {
getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
}
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index ecbd0a3a49..134387562e 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -459,6 +459,14 @@ type mspan struct {
limit uintptr // end of data in span
speciallock mutex // guards specials list
specials *special // linked list of special records sorted by offset.
+
+ // freeIndexForScan is like freeindex, except that freeindex is
+ // used by the allocator whereas freeIndexForScan is used by the
+ // GC scanner. They are two fields so that the GC sees the object
+ // is allocated only when the object and the heap bits are
+ // initialized (see also the assignment of freeIndexForScan in
+ // mallocgc, and issue 54596).
+ freeIndexForScan uintptr
}
func (s *mspan) base() uintptr {
@@ -1250,6 +1258,7 @@ HaveSpan:
// Initialize mark and allocation structures.
s.freeindex = 0
+ s.freeIndexForScan = 0
s.allocCache = ^uint64(0) // all 1s indicating all free.
s.gcmarkBits = newMarkBits(s.nelems)
s.allocBits = newAllocBits(s.nelems)
@@ -1565,6 +1574,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
span.specials = nil
span.needzero = 0
span.freeindex = 0
+ span.freeIndexForScan = 0
span.allocBits = nil
span.gcmarkBits = nil
span.state.set(mSpanDead)