Skip to content

Commit 9397fdd

Browse files
committed
sync: yield to the waiter when unlocking a starving mutex
When we have already assigned the semaphore ticket to a specific waiter, we want to get the waiter running as fast as possible since no other G waiting on the semaphore can acquire it optimistically. The net effect is that, when a sync.Mutex is contented, the code in the critical section guarded by the Mutex gets a priority boost. Fixes golang#33747 Change-Id: I9967f0f763c25504010651bdd7f944ee0189cd45
1 parent 3322f3e commit 9397fdd

File tree

4 files changed

+63
-1
lines changed

4 files changed

+63
-1
lines changed

src/runtime/export_test.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -718,3 +718,11 @@ func RunGetgThreadSwitchTest() {
718718
panic("g1 != g3")
719719
}
720720
}
721+
722+
var Semacquire = semacquire
723+
var Semrelease1 = semrelease1
724+
725+
func SemNwait(addr *uint32) uint32 {
726+
root := semroot(addr)
727+
return atomic.Load(&root.nwait)
728+
}

src/runtime/sema.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,15 @@ func semrelease1(addr *uint32, handoff bool, skipframes int) {
192192
s.ticket = 1
193193
}
194194
readyWithTime(s, 5+skipframes)
195+
if s.ticket == 1 {
196+
// Direct G handoff
197+
// readyWithTime has added the waiter G as runnext in the current P; we now call
198+
// the scheduler so that we start running the waiter G immediately. Note that
199+
// waiter inherits our time slice: this is desirable to avoid having a highly
200+
// contended semaphore hog the P indefinitely.
201+
// See https://github.com/golang/go/issues/33747 for discussion.
202+
Gosched()
203+
}
195204
}
196205
}
197206

src/runtime/sema_test.go

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
package runtime_test
2+
3+
import (
4+
. "runtime"
5+
"sync/atomic"
6+
"testing"
7+
)
8+
9+
// TestSemaHandoff checks that when semrelease+handoff is
10+
// requested, the G that releases the semaphore yields its
11+
// P directly to the first waiter in line.
12+
// See https://github.com/golang/go/issues/33747 for discussion.
13+
func TestSemaHandoff(t *testing.T) {
14+
const iter = 10000
15+
fail := 0
16+
for i := 0; i < iter; i++ {
17+
res := testSemaHandoff()
18+
if res != 1 {
19+
fail++
20+
}
21+
}
22+
// As long as >= 95% of handoffs are direct, we
23+
// consider the test successful.
24+
if fail > iter*5/100 {
25+
t.Fatal("direct handoff failing:", fail)
26+
}
27+
}
28+
29+
func testSemaHandoff() (res uint32) {
30+
sema := uint32(1)
31+
32+
Semacquire(&sema)
33+
go func() {
34+
Semacquire(&sema)
35+
atomic.CompareAndSwapUint32(&res, 0, 1)
36+
}()
37+
for SemNwait(&sema) == 0 {
38+
Gosched()
39+
}
40+
Semrelease1(&sema, true, 0)
41+
atomic.CompareAndSwapUint32(&res, 0, 2)
42+
43+
return
44+
}

src/sync/mutex.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,8 @@ func (m *Mutex) unlockSlow(new int32) {
216216
old = m.state
217217
}
218218
} else {
219-
// Starving mode: handoff mutex ownership to the next waiter.
219+
// Starving mode: handoff mutex ownership to the next waiter, and yield our time slice
220+
// so that the next waiter can start to run immediately.
220221
// Note: mutexLocked is not set, the waiter will set it after wakeup.
221222
// But mutex is still considered locked if mutexStarving is set,
222223
// so new coming goroutines won't acquire it.

0 commit comments

Comments
 (0)