Skip to content

Commit bc0f02e

Browse files
rmacnak-googleCommit Queue
authored and
Commit Queue
committed
[vm, gc] Incremental compaction.
At the beginning of a major GC cycle, select some mostly-empty pages to be evacuated. Mark the pages and the objects on these pages. Apply a write barrier for stores creating old -> evacuation candidate pointers, and discover any such pointers that already exist during marking. At the end of a major GC cycle, evacuate objects from these pages. Forward pointers of objects in the remembered set and new-space. Free the evacuated pages. This compaction is incremental in the sense that creating the remembered set is interleaved with mutator execution. The evacuation step, however, is stop-the-world. Write-barrier elimination for x.slot = x is removed. Write-barrier elimination for x.slot = constant is removed in the JIT, kept for AOT but snapshot pages are marked as never-evacuate. TEST=ci Bug: #52513 Change-Id: Icbc29ef7cb662ef8759b8c1d7a63b7af60766281 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/357760 Reviewed-by: Alexander Aprelev <[email protected]> Commit-Queue: Ryan Macnak <[email protected]>
1 parent 45221e5 commit bc0f02e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+1695
-144
lines changed

runtime/docs/gc.md

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -95,18 +95,18 @@ But we combine the generational and incremental checks with a shift-and-mask.
9595
```c++
9696
enum HeaderBits {
9797
...
98-
kNotMarkedBit, // Incremental barrier target.
99-
kNewBit, // Generational barrier target.
100-
kAlwaysSetBit, // Incremental barrier source.
101-
kOldAndNotRememberedBit, // Generational barrier source.
98+
kNotMarkedBit, // Incremental barrier target.
99+
kNewOrEvacuationCandidateBit, // Generational barrier target.
100+
kAlwaysSetBit, // Incremental barrier source.
101+
kOldAndNotRememberedBit, // Generational barrier source.
102102
...
103103
};
104104
105-
static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewBit;
105+
static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewOrEvacuationCandidateBit;
106106
static constexpr intptr_t kIncrementalBarrierMask = 1 << kNotMarkedBit;
107107
static constexpr intptr_t kBarrierOverlapShift = 2;
108108
COMPILE_ASSERT(kNotMarkedBit + kBarrierOverlapShift == kAlwaysSetBit);
109-
COMPILE_ASSERT(kNewBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
109+
COMPILE_ASSERT(kNewOrEvacuationCandidateBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
110110
111111
StorePointer(ObjectPtr source, ObjectPtr* slot, ObjectPtr target) {
112112
*slot = target;
@@ -178,7 +178,6 @@ We can eliminate these checks when the compiler can prove these cases cannot hap
178178
* `value` is a constant. Constants are always old, and they will be marked via the constant pools even if we fail to mark them via `container`.
179179
* `value` has the static type bool. All possible values of the bool type (null, false, true) are constants.
180180
* `value` is known to be a Smi. Smis are not heap objects.
181-
* `container` is the same object as `value`. The GC never needs to retain an additional object if it sees a self-reference, so ignoring a self-reference cannot cause us to free a reachable object.
182181
* `container` is known to be a new object or known to be an old object that is in the remembered set and is marked if marking is in progress.
183182

184183
We can know that `container` meets the last property if `container` is the result of an allocation (instead of a heap load), and there is no instruction that can trigger a GC between the allocation and the store. This is because the allocation stubs ensure the result of AllocateObject is either a new-space object (common case, bump pointer allocation succeeds), or has been preemptively added to the remembered set and marking worklist (uncommon case, entered runtime to allocate object, possibly triggering GC).

runtime/platform/atomic.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,8 @@ class RelaxedAtomic {
8080
}
8181
T operator+=(T arg) { return fetch_add(arg) + arg; }
8282
T operator-=(T arg) { return fetch_sub(arg) - arg; }
83-
T& operator++() { return fetch_add(1) + 1; }
84-
T& operator--() { return fetch_sub(1) - 1; }
83+
T operator++() { return fetch_add(1) + 1; }
84+
T operator--() { return fetch_sub(1) - 1; }
8585
T operator++(int) { return fetch_add(1); }
8686
T operator--(int) { return fetch_sub(1); }
8787

runtime/vm/app_snapshot.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -893,7 +893,7 @@ void Deserializer::InitializeHeader(ObjectPtr raw,
893893
tags = UntaggedObject::AlwaysSetBit::update(true, tags);
894894
tags = UntaggedObject::NotMarkedBit::update(true, tags);
895895
tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
896-
tags = UntaggedObject::NewBit::update(false, tags);
896+
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
897897
tags = UntaggedObject::ImmutableBit::update(is_immutable, tags);
898898
raw->untag()->tags_ = tags;
899899
}

runtime/vm/bitfield.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@ class AtomicBitFieldContainer : AtomicBitFieldContainerBase {
3636
}
3737

3838
T load(std::memory_order order) const { return field_.load(order); }
39+
NO_SANITIZE_THREAD T load_ignore_race() const {
40+
return *reinterpret_cast<const T*>(&field_);
41+
}
3942
void store(T value, std::memory_order order) { field_.store(value, order); }
4043

4144
bool compare_exchange_weak(T old_tags, T new_tags, std::memory_order order) {
@@ -48,11 +51,6 @@ class AtomicBitFieldContainer : AtomicBitFieldContainerBase {
4851
return TargetBitField::decode(field_.load(order));
4952
}
5053

51-
template <class TargetBitField>
52-
NO_SANITIZE_THREAD typename TargetBitField::Type ReadIgnoreRace() const {
53-
return TargetBitField::decode(*reinterpret_cast<const T*>(&field_));
54-
}
55-
5654
template <class TargetBitField,
5755
std::memory_order order = std::memory_order_relaxed>
5856
void UpdateBool(bool value) {

runtime/vm/compiler/assembler/assembler_arm.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1933,7 +1933,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
19331933
Label done;
19341934
BranchIfSmi(value, &done, kNearJump);
19351935
ldrb(TMP, FieldAddress(value, target::Object::tags_offset()));
1936-
tst(TMP, Operand(1 << target::UntaggedObject::kNewBit));
1936+
tst(TMP, Operand(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
19371937
b(&done, ZERO);
19381938
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
19391939
tst(TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));

runtime/vm/compiler/assembler/assembler_arm64.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1231,7 +1231,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
12311231
Label done;
12321232
BranchIfSmi(value, &done, kNearJump);
12331233
ldr(TMP, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1234-
tbz(&done, TMP, target::UntaggedObject::kNewBit);
1234+
tbz(&done, TMP, target::UntaggedObject::kNewOrEvacuationCandidateBit);
12351235
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
12361236
tbz(&done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
12371237
Stop("Write barrier is required");

runtime/vm/compiler/assembler/assembler_ia32.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2207,7 +2207,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
22072207
Label done;
22082208
BranchIfSmi(value, &done, kNearJump);
22092209
testb(FieldAddress(value, target::Object::tags_offset()),
2210-
Immediate(1 << target::UntaggedObject::kNewBit));
2210+
Immediate(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
22112211
j(ZERO, &done, Assembler::kNearJump);
22122212
testb(FieldAddress(object, target::Object::tags_offset()),
22132213
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));

runtime/vm/compiler/assembler/assembler_riscv.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3518,7 +3518,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
35183518
Label done;
35193519
BranchIfSmi(value, &done, kNearJump);
35203520
lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
3521-
andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewBit);
3521+
andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewOrEvacuationCandidateBit);
35223522
beqz(TMP2, &done, kNearJump);
35233523
lbu(TMP2, FieldAddress(object, target::Object::tags_offset()));
35243524
andi(TMP2, TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);

runtime/vm/compiler/assembler/assembler_x64.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1684,7 +1684,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
16841684
Label done;
16851685
BranchIfSmi(value, &done, kNearJump);
16861686
testb(FieldAddress(value, target::Object::tags_offset()),
1687-
Immediate(1 << target::UntaggedObject::kNewBit));
1687+
Immediate(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
16881688
j(ZERO, &done, Assembler::kNearJump);
16891689
testb(FieldAddress(object, target::Object::tags_offset()),
16901690
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));

runtime/vm/compiler/backend/il.cc

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1392,10 +1392,17 @@ bool Value::NeedsWriteBarrier() {
13921392

13931393
// Strictly speaking, the incremental barrier can only be skipped for
13941394
// immediate objects (Smis) or permanent objects (vm-isolate heap or
1395-
// image pages). Here we choose to skip the barrier for any constant on
1396-
// the assumption it will remain reachable through the object pool.
1395+
// image pages). For AOT, we choose to skip the barrier for any constant on
1396+
// the assumptions it will remain reachable through the object pool and it
1397+
// is on a page created by snapshot loading that is marked so as to never be
1398+
// evacuated.
13971399
if (value->BindsToConstant()) {
1398-
return false;
1400+
if (FLAG_precompiled_mode) {
1401+
return false;
1402+
} else {
1403+
const Object& constant = value->BoundConstant();
1404+
return constant.ptr()->IsHeapObject() && !constant.InVMIsolateHeap();
1405+
}
13991406
}
14001407

14011408
// Follow the chain of redefinitions as redefined value could have a more

runtime/vm/compiler/backend/il.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6417,11 +6417,6 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
64176417
// The target field is native and unboxed, so not traversed by the GC.
64186418
return false;
64196419
}
6420-
if (instance()->definition() == value()->definition()) {
6421-
// `x.slot = x` cannot create an old->new or old&marked->old&unmarked
6422-
// reference.
6423-
return false;
6424-
}
64256420

64266421
if (value()->definition()->Type()->IsBool()) {
64276422
return false;
@@ -7074,12 +7069,6 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
70747069
bool aligned() const { return alignment_ == kAlignedAccess; }
70757070

70767071
bool ShouldEmitStoreBarrier() const {
7077-
if (array()->definition() == value()->definition()) {
7078-
// `x[slot] = x` cannot create an old->new or old&marked->old&unmarked
7079-
// reference.
7080-
return false;
7081-
}
7082-
70837072
if (value()->definition()->Type()->IsBool()) {
70847073
return false;
70857074
}

runtime/vm/compiler/frontend/base_flow_graph_builder.cc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -522,11 +522,9 @@ Fragment BaseFlowGraphBuilder::StoreNativeField(
522522
StoreBarrierType emit_store_barrier /* = kEmitStoreBarrier */,
523523
compiler::Assembler::MemoryOrder memory_order /* = kRelaxed */) {
524524
Value* value = Pop();
525-
if (value->BindsToConstant()) {
526-
emit_store_barrier = kNoStoreBarrier;
527-
}
525+
Value* instance = Pop();
528526
StoreFieldInstr* store = new (Z)
529-
StoreFieldInstr(slot, Pop(), value, emit_store_barrier,
527+
StoreFieldInstr(slot, instance, value, emit_store_barrier,
530528
stores_inner_pointer, InstructionSource(position), kind);
531529
return Fragment(store);
532530
}

runtime/vm/compiler/runtime_api.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -361,7 +361,7 @@ uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size) {
361361
return dart::UntaggedObject::SizeTag::encode(
362362
TranslateOffsetInWordsToHost(instance_size)) |
363363
dart::UntaggedObject::ClassIdTag::encode(cid) |
364-
dart::UntaggedObject::NewBit::encode(true) |
364+
dart::UntaggedObject::NewOrEvacuationCandidateBit::encode(true) |
365365
dart::UntaggedObject::AlwaysSetBit::encode(true) |
366366
dart::UntaggedObject::NotMarkedBit::encode(true) |
367367
dart::UntaggedObject::ImmutableBit::encode(
@@ -377,7 +377,8 @@ const word UntaggedObject::kCardRememberedBit =
377377

378378
const word UntaggedObject::kCanonicalBit = dart::UntaggedObject::kCanonicalBit;
379379

380-
const word UntaggedObject::kNewBit = dart::UntaggedObject::kNewBit;
380+
const word UntaggedObject::kNewOrEvacuationCandidateBit =
381+
dart::UntaggedObject::kNewOrEvacuationCandidateBit;
381382

382383
const word UntaggedObject::kOldAndNotRememberedBit =
383384
dart::UntaggedObject::kOldAndNotRememberedBit;

runtime/vm/compiler/runtime_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,7 @@ class UntaggedObject : public AllStatic {
418418
public:
419419
static const word kCardRememberedBit;
420420
static const word kCanonicalBit;
421-
static const word kNewBit;
421+
static const word kNewOrEvacuationCandidateBit;
422422
static const word kOldAndNotRememberedBit;
423423
static const word kNotMarkedBit;
424424
static const word kImmutableBit;

runtime/vm/flag_list.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,8 +127,8 @@ constexpr bool FLAG_support_il_printer = false;
127127
R(log_marker_tasks, false, bool, false, \
128128
"Log debugging information for old gen GC marking tasks.") \
129129
P(scavenger_tasks, int, 2, \
130-
"The number of tasks to spawn during scavenging (0 means " \
131-
"perform all marking on main thread).") \
130+
"The number of tasks to spawn during scavenging and incremental " \
131+
"compaction (0 means perform all work on the main thread).") \
132132
P(mark_when_idle, bool, false, \
133133
"The Dart thread will assist in concurrent marking during idle time and " \
134134
"is counted as one marker task") \
@@ -216,6 +216,8 @@ constexpr bool FLAG_support_il_printer = false;
216216
P(truncating_left_shift, bool, true, \
217217
"Optimize left shift to truncate if possible") \
218218
P(use_compactor, bool, false, "Compact the heap during old-space GC.") \
219+
P(use_incremental_compactor, bool, true, \
220+
"Compact the heap during old-space GC.") \
219221
P(use_cha_deopt, bool, true, \
220222
"Use class hierarchy analysis even if it can cause deoptimization.") \
221223
P(use_field_guards, bool, true, "Use field guards and track field types") \

runtime/vm/heap/become.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ ForwardingCorpse* ForwardingCorpse::AsForwarder(uword addr, intptr_t size) {
2929
bool is_old = (addr & kNewObjectAlignmentOffset) == kOldObjectAlignmentOffset;
3030
tags = UntaggedObject::NotMarkedBit::update(true, tags);
3131
tags = UntaggedObject::OldAndNotRememberedBit::update(is_old, tags);
32-
tags = UntaggedObject::NewBit::update(!is_old, tags);
32+
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(!is_old, tags);
3333

3434
result->tags_ = tags;
3535
if (size > UntaggedObject::SizeTag::kMaxSizeTag) {

runtime/vm/heap/compactor.cc

Lines changed: 75 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66

77
#include "platform/atomic.h"
88
#include "vm/globals.h"
9-
#include "vm/heap/become.h"
109
#include "vm/heap/heap.h"
1110
#include "vm/heap/pages.h"
11+
#include "vm/heap/sweeper.h"
1212
#include "vm/thread_barrier.h"
1313
#include "vm/timeline.h"
1414

@@ -184,18 +184,52 @@ class CompactorTask : public ThreadPool::Task {
184184
void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
185185
SetupImagePageBoundaries();
186186

187-
// Divide the heap.
187+
Page* fixed_head = nullptr;
188+
Page* fixed_tail = nullptr;
189+
190+
// Divide the heap, and set aside never-evacuate pages.
188191
// TODO(30978): Try to divide based on live bytes or with work stealing.
189192
intptr_t num_pages = 0;
190-
for (Page* page = pages; page != nullptr; page = page->next()) {
191-
num_pages++;
193+
Page* page = pages;
194+
Page* prev = nullptr;
195+
while (page != nullptr) {
196+
Page* next = page->next();
197+
if (page->is_never_evacuate()) {
198+
if (prev != nullptr) {
199+
prev->set_next(next);
200+
} else {
201+
pages = next;
202+
}
203+
if (fixed_tail == nullptr) {
204+
fixed_tail = page;
205+
}
206+
page->set_next(fixed_head);
207+
fixed_head = page;
208+
} else {
209+
prev = page;
210+
num_pages++;
211+
}
212+
page = next;
192213
}
214+
fixed_pages_ = fixed_head;
193215

194216
intptr_t num_tasks = FLAG_compactor_tasks;
195217
RELEASE_ASSERT(num_tasks >= 1);
196218
if (num_pages < num_tasks) {
197219
num_tasks = num_pages;
198220
}
221+
if (num_tasks == 0) {
222+
ASSERT(pages == nullptr);
223+
224+
// Move pages to sweeper work lists.
225+
heap_->old_space()->pages_ = nullptr;
226+
heap_->old_space()->pages_tail_ = nullptr;
227+
heap_->old_space()->sweep_regular_ = fixed_head;
228+
229+
heap_->old_space()->Sweep(/*exclusive*/ true);
230+
heap_->old_space()->SweepLarge();
231+
return;
232+
}
199233

200234
Partition* partitions = new Partition[num_tasks];
201235

@@ -206,6 +240,7 @@ void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
206240
Page* page = pages;
207241
Page* prev = nullptr;
208242
while (task_index < num_tasks) {
243+
ASSERT(!page->is_never_evacuate());
209244
if (page_index % pages_per_task == 0) {
210245
partitions[task_index].head = page;
211246
partitions[task_index].tail = nullptr;
@@ -352,6 +387,12 @@ void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
352387
partitions[num_tasks - 1].tail->set_next(nullptr);
353388
heap_->old_space()->pages_ = pages = partitions[0].head;
354389
heap_->old_space()->pages_tail_ = partitions[num_tasks - 1].tail;
390+
if (fixed_head != nullptr) {
391+
fixed_tail->set_next(heap_->old_space()->pages_);
392+
heap_->old_space()->pages_ = fixed_head;
393+
394+
ASSERT(heap_->old_space()->pages_tail_ != nullptr);
395+
}
355396

356397
delete[] partitions;
357398
}
@@ -486,6 +527,7 @@ void CompactorTask::RunEnteredIsolateGroup() {
486527
}
487528

488529
void CompactorTask::PlanPage(Page* page) {
530+
ASSERT(!page->is_never_evacuate());
489531
uword current = page->object_start();
490532
uword end = page->object_end();
491533

@@ -498,6 +540,7 @@ void CompactorTask::PlanPage(Page* page) {
498540
}
499541

500542
void CompactorTask::SlidePage(Page* page) {
543+
ASSERT(!page->is_never_evacuate());
501544
uword current = page->object_start();
502545
uword end = page->object_end();
503546

@@ -667,6 +710,11 @@ void GCCompactor::ForwardPointer(ObjectPtr* ptr) {
667710
if (forwarding_page == nullptr) {
668711
return; // Not moved (VM isolate, large page, code page).
669712
}
713+
if (page->is_never_evacuate()) {
714+
// Forwarding page is non-NULL since one is still reserved for use as a
715+
// counting page, but it doesn't have forwarding information.
716+
return;
717+
}
670718

671719
ObjectPtr new_target =
672720
UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
@@ -703,6 +751,11 @@ void GCCompactor::ForwardCompressedPointer(uword heap_base,
703751
if (forwarding_page == nullptr) {
704752
return; // Not moved (VM isolate, large page, code page).
705753
}
754+
if (page->is_never_evacuate()) {
755+
// Forwarding page is non-NULL since one is still reserved for use as a
756+
// counting page, but it doesn't have forwarding information.
757+
return;
758+
}
706759

707760
ObjectPtr new_target =
708761
UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
@@ -796,6 +849,24 @@ void GCCompactor::ForwardLargePages() {
796849
page->VisitObjectPointers(this);
797850
ml.Lock();
798851
}
852+
while (fixed_pages_ != nullptr) {
853+
Page* page = fixed_pages_;
854+
fixed_pages_ = page->next();
855+
ml.Unlock();
856+
857+
GCSweeper sweeper;
858+
FreeList* freelist = heap_->old_space()->DataFreeList(0);
859+
bool page_in_use;
860+
{
861+
MutexLocker ml(freelist->mutex());
862+
page_in_use = sweeper.SweepPage(page, freelist);
863+
}
864+
ASSERT(page_in_use);
865+
866+
page->VisitObjectPointers(this);
867+
868+
ml.Lock();
869+
}
799870
}
800871

801872
void GCCompactor::ForwardStackPointers() {

runtime/vm/heap/compactor.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ class GCCompactor : public ValueObject,
7474

7575
Mutex large_pages_mutex_;
7676
Page* large_pages_ = nullptr;
77+
Page* fixed_pages_ = nullptr;
7778

7879
// The typed data views whose inner pointer must be updated after sliding is
7980
// complete.

0 commit comments

Comments
 (0)