Skip to content

Commit febe7b8

Browse files
committed
runtime: make GC see object as allocated after it is initialized
When the GC is scanning some memory (possibly conservatively), finding a pointer, while concurrently another goroutine is allocating an object at the same address as the found pointer, the GC may see the pointer before the object and/or the heap bits are initialized. This may cause the GC to see bad pointers and possibly crash. To prevent this, we make it that the scanner can only see the object as allocated after the object and the heap bits are initialized. Currently the allocator uses freeindex to find the next available slot, and that code is coupled with updating the free index to a new slot past it. The scanner also uses the freeindex to determine if an object is allocated. This is somewhat racy. This CL makes the scanner use a different field, which is only updated after the object initialization (and a memory barrier). Fixes #54596. Change-Id: I2a57a226369926e7192c253dd0d21d3faf22297c Reviewed-on: https://go-review.googlesource.com/c/go/+/449017 Reviewed-by: Austin Clements <[email protected]> Reviewed-by: Michael Knyszek <[email protected]> Run-TryBot: Cherry Mui <[email protected]> TryBot-Result: Gopher Robot <[email protected]>
1 parent d52883f commit febe7b8

File tree

5 files changed

+24
-1
lines changed

5 files changed

+24
-1
lines changed

Diff for: src/runtime/arena.go

+2
Original file line numberDiff line numberDiff line change
@@ -995,6 +995,8 @@ func (h *mheap) allocUserArenaChunk() *mspan {
995995
memclrNoHeapPointers(unsafe.Pointer(s.base()), s.elemsize)
996996
s.needzero = 0
997997

998+
s.freeIndexForScan = 1
999+
9981000
// Set up the range for allocation.
9991001
s.userArenaChunkFree = makeAddrRange(base, s.limit)
10001002
return s

Diff for: src/runtime/malloc.go

+10
Original file line numberDiff line numberDiff line change
@@ -1092,6 +1092,16 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
10921092
// the garbage collector could follow a pointer to x,
10931093
// but see uninitialized memory or stale heap bits.
10941094
publicationBarrier()
1095+
// As x and the heap bits are initialized, update
1096+
// freeIndexForScan now so x is seen by the GC
1097+
// (including convervative scan) as an allocated object.
1098+
// While this pointer can't escape into user code as a
1099+
// _live_ pointer until we return, conservative scanning
1100+
// may find a dead pointer that happens to point into this
1101+
// object. Delaying this update until now ensures that
1102+
// conservative scanning considers this pointer dead until
1103+
// this point.
1104+
span.freeIndexForScan = span.freeindex
10951105

10961106
// Allocate black during GC.
10971107
// All slots hold nil so no scanning is needed.

Diff for: src/runtime/mbitmap.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ func (s *mspan) nextFreeIndex() uintptr {
191191
// been no preemption points since ensuring this (which could allow a
192192
// GC transition, which would allow the state to change).
193193
func (s *mspan) isFree(index uintptr) bool {
194-
if index < s.freeindex {
194+
if index < s.freeIndexForScan {
195195
return false
196196
}
197197
bytep, mask := s.allocBits.bitp(index)

Diff for: src/runtime/mgcsweep.go

+1
Original file line numberDiff line numberDiff line change
@@ -648,6 +648,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
648648

649649
s.allocCount = nalloc
650650
s.freeindex = 0 // reset allocation index to start of span.
651+
s.freeIndexForScan = 0
651652
if trace.enabled {
652653
getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
653654
}

Diff for: src/runtime/mheap.go

+10
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,14 @@ type mspan struct {
487487
speciallock mutex // guards specials list
488488
specials *special // linked list of special records sorted by offset.
489489
userArenaChunkFree addrRange // interval for managing chunk allocation
490+
491+
// freeIndexForScan is like freeindex, except that freeindex is
492+
// used by the allocator whereas freeIndexForScan is used by the
493+
// GC scanner. They are two fields so that the GC sees the object
494+
// is allocated only when the object and the heap bits are
495+
// initialized (see also the assignment of freeIndexForScan in
496+
// mallocgc, and issue 54596).
497+
freeIndexForScan uintptr
490498
}
491499

492500
func (s *mspan) base() uintptr {
@@ -1386,6 +1394,7 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
13861394

13871395
// Initialize mark and allocation structures.
13881396
s.freeindex = 0
1397+
s.freeIndexForScan = 0
13891398
s.allocCache = ^uint64(0) // all 1s indicating all free.
13901399
s.gcmarkBits = newMarkBits(s.nelems)
13911400
s.allocBits = newAllocBits(s.nelems)
@@ -1657,6 +1666,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
16571666
span.specials = nil
16581667
span.needzero = 0
16591668
span.freeindex = 0
1669+
span.freeIndexForScan = 0
16601670
span.allocBits = nil
16611671
span.gcmarkBits = nil
16621672
span.state.set(mSpanDead)

0 commit comments

Comments
 (0)