Skip to content

Commit 1b736b3

Browse files
committed
runtime: consolidate "is sweep done" conditions
The runtime currently has two different notions of sweep completion: 1. All spans are either swept or have begun sweeping. 2. The sweeper has *finished* sweeping all spans. Having both is confusing (it doesn't help that the documentation is often unclear or wrong). Condition 2 is stronger and the theoretical slight optimization that condition 1 could impact is never actually useful. Hence, this CL consolidates both conditions down to condition 2. Updates #45315. Change-Id: I55c84d767d74eb31a004a5619eaba2e351162332 Reviewed-on: https://go-review.googlesource.com/c/go/+/307916 Trust: Austin Clements <[email protected]> Run-TryBot: Austin Clements <[email protected]> TryBot-Result: Go Bot <[email protected]> Reviewed-by: Michael Knyszek <[email protected]>
1 parent a25a77a commit 1b736b3

File tree

3 files changed

+18
-14
lines changed

3 files changed

+18
-14
lines changed

src/runtime/mgc.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ func gcinit() {
175175
}
176176

177177
// No sweep on the first cycle.
178-
mheap_.sweepdone = 1
178+
mheap_.sweepDrained = 1
179179

180180
// Set a reasonable initial GC trigger.
181181
memstats.triggerRatio = 7 / 8.0
@@ -1187,7 +1187,7 @@ func GC() {
11871187
// First, wait for sweeping to finish. (We know there are no
11881188
// more spans on the sweep queue, but we may be concurrently
11891189
// sweeping spans, so we have to wait.)
1190-
for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 {
1190+
for atomic.Load(&work.cycles) == n+1 && !isSweepDone() {
11911191
Gosched()
11921192
}
11931193

@@ -2192,7 +2192,7 @@ func gcSweep(mode gcMode) {
21922192

21932193
lock(&mheap_.lock)
21942194
mheap_.sweepgen += 2
2195-
mheap_.sweepdone = 0
2195+
mheap_.sweepDrained = 0
21962196
mheap_.pagesSwept = 0
21972197
mheap_.sweepArenas = mheap_.allArenas
21982198
mheap_.reclaimIndex = 0

src/runtime/mgcsweep.go

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ func (l *sweepLocker) dispose() {
238238
// Decrement the number of active sweepers and if this is the
239239
// last one, mark sweep as complete.
240240
l.blocking = false
241-
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
241+
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
242242
l.sweepIsDone()
243243
}
244244
}
@@ -257,7 +257,7 @@ func sweepone() uintptr {
257257
// increment locks to ensure that the goroutine is not preempted
258258
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
259259
_g_.m.locks++
260-
if atomic.Load(&mheap_.sweepdone) != 0 {
260+
if atomic.Load(&mheap_.sweepDrained) != 0 {
261261
_g_.m.locks--
262262
return ^uintptr(0)
263263
}
@@ -271,7 +271,7 @@ func sweepone() uintptr {
271271
for {
272272
s := mheap_.nextSpanForSweep()
273273
if s == nil {
274-
noMoreWork = atomic.Cas(&mheap_.sweepdone, 0, 1)
274+
noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
275275
break
276276
}
277277
if state := s.state.get(); state != mSpanInUse {
@@ -335,14 +335,17 @@ func sweepone() uintptr {
335335
return npages
336336
}
337337

338-
// isSweepDone reports whether all spans are swept or currently being swept.
338+
// isSweepDone reports whether all spans are swept.
339339
//
340340
// Note that this condition may transition from false to true at any
341341
// time as the sweeper runs. It may transition from true to false if a
342342
// GC runs; to prevent that the caller must be non-preemptible or must
343343
// somehow block GC progress.
344344
func isSweepDone() bool {
345-
return mheap_.sweepdone != 0
345+
// Check that all spans have at least begun sweeping and there
346+
// are no active sweepers. If both are true, then all spans
347+
// have finished sweeping.
348+
return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
346349
}
347350

348351
// Returns only when span s has been swept.

src/runtime/mheap.go

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,11 +62,12 @@ const (
6262
type mheap struct {
6363
// lock must only be acquired on the system stack, otherwise a g
6464
// could self-deadlock if its stack grows with the lock held.
65-
lock mutex
66-
pages pageAlloc // page allocation data structure
67-
sweepgen uint32 // sweep generation, see comment in mspan; written during STW
68-
sweepdone uint32 // all spans are swept
69-
sweepers uint32 // number of active sweepone calls
65+
lock mutex
66+
pages pageAlloc // page allocation data structure
67+
68+
sweepgen uint32 // sweep generation, see comment in mspan; written during STW
69+
sweepDrained uint32 // all spans are swept or are being swept
70+
sweepers uint32 // number of active sweepone calls
7071

7172
// allspans is a slice of all mspans ever created. Each mspan
7273
// appears exactly once.
@@ -904,7 +905,7 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan
904905
systemstack(func() {
905906
// To prevent excessive heap growth, before allocating n pages
906907
// we need to sweep and reclaim at least n pages.
907-
if h.sweepdone == 0 {
908+
if !isSweepDone() {
908909
h.reclaim(npages)
909910
}
910911
s = h.allocSpan(npages, spanAllocHeap, spanclass)

0 commit comments

Comments
 (0)