|
32 | 32 | #include "runtime/javaThread.hpp"
|
33 | 33 | #include "runtime/os.inline.hpp"
|
34 | 34 |
|
35 |
| -// These are inline variants of Thread::SpinAcquire with optional blocking in VM. |
36 |
| - |
37 |
| -class ShenandoahNoBlockOp : public StackObj { |
38 |
| -public: |
39 |
| - ShenandoahNoBlockOp(JavaThread* java_thread) { |
40 |
| - assert(java_thread == nullptr, "Should not pass anything"); |
41 |
| - } |
42 |
| -}; |
43 |
| - |
44 | 35 | void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
|
45 | 36 | Thread* thread = Thread::current();
|
46 | 37 | if (allow_block_for_safepoint && thread->is_Java_thread()) {
|
47 |
| - contended_lock_internal<ThreadBlockInVM>(JavaThread::cast(thread)); |
| 38 | + contended_lock_internal<true>(JavaThread::cast(thread)); |
48 | 39 | } else {
|
49 |
| - contended_lock_internal<ShenandoahNoBlockOp>(nullptr); |
| 40 | + contended_lock_internal<false>(nullptr); |
50 | 41 | }
|
51 | 42 | }
|
52 | 43 |
|
53 |
| -template<typename BlockOp> |
| 44 | +template<bool ALLOW_BLOCK> |
54 | 45 | void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
|
55 |
| - int ctr = 0; |
56 |
| - int yields = 0; |
| 46 | + assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block."); |
| 47 | + // Spin this much on multi-processor, do not spin on multi-processor. |
| 48 | + int ctr = os::is_MP() ? 0xFF : 0; |
| 49 | + // Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread. |
57 | 50 | while (Atomic::load(&_state) == locked ||
|
58 | 51 | Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
|
59 |
| - if ((++ctr & 0xFFF) == 0) { |
60 |
| - BlockOp block(java_thread); |
61 |
| - if (yields > 5) { |
62 |
| - os::naked_short_sleep(1); |
| 52 | + if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) { |
| 53 | + // Lightly contended, spin a little if no safepoint is pending. |
| 54 | + SpinPause(); |
| 55 | + ctr--; |
| 56 | + } else if (ALLOW_BLOCK) { |
| 57 | + ThreadBlockInVM block(java_thread); |
| 58 | + if (SafepointSynchronize::is_synchronizing()) { |
| 59 | + // If safepoint is pending, we want to block and allow safepoint to proceed. |
| 60 | + // Normally, TBIVM above would block us in its destructor. |
| 61 | + // |
| 62 | + // But that blocking only happens when TBIVM knows the thread poll is armed. |
| 63 | + // There is a window between announcing a safepoint and arming the thread poll |
| 64 | + // during which trying to continuously enter TBIVM is counter-productive. |
| 65 | + // Under high contention, we may end up going in circles thousands of times. |
| 66 | + // To avoid it, we wait here until local poll is armed and then proceed |
| 67 | + // to TBVIM exit for blocking. We do not SpinPause, but yield to let |
| 68 | + // VM thread to arm the poll sooner. |
| 69 | + while (SafepointSynchronize::is_synchronizing() && |
| 70 | + !SafepointMechanism::local_poll_armed(java_thread)) { |
| 71 | + os::naked_yield(); |
| 72 | + } |
63 | 73 | } else {
|
64 | 74 | os::naked_yield();
|
65 |
| - yields++; |
66 | 75 | }
|
67 | 76 | } else {
|
68 |
| - SpinPause(); |
| 77 | + os::naked_yield(); |
69 | 78 | }
|
70 | 79 | }
|
71 | 80 | }
|
|
0 commit comments