13
13
#include " bytemap.h"
14
14
#include " common.h"
15
15
#include " list.h"
16
- #include " local_cache.h"
17
16
#include " options.h"
18
17
#include " release.h"
19
18
#include " report.h"
19
+ #include " size_class_allocator.h"
20
20
#include " stats.h"
21
21
#include " string_utils.h"
22
22
#include " thread_annotations.h"
@@ -52,7 +52,10 @@ template <typename Config> class SizeClassAllocator32 {
52
52
static_assert ((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize,
53
53
"");
54
54
typedef SizeClassAllocator32<Config> ThisT;
55
- typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
55
+ using SizeClassAllocatorT =
56
+ typename Conditional<Config::getEnableBlockCache(),
57
+ SizeClassAllocatorLocalCache<ThisT>,
58
+ SizeClassAllocatorNoCache<ThisT>>::type;
56
59
typedef TransferBatch<ThisT> TransferBatchT;
57
60
typedef BatchGroup<ThisT> BatchGroupT;
58
61
@@ -191,25 +194,28 @@ template <typename Config> class SizeClassAllocator32 {
191
194
return BlockSize > PageSize;
192
195
}
193
196
194
- u16 popBlocks (CacheT *C , uptr ClassId, CompactPtrT *ToArray ,
195
- const u16 MaxBlockCount) {
197
+ u16 popBlocks (SizeClassAllocatorT *SizeClassAllocator , uptr ClassId,
198
+ CompactPtrT *ToArray, const u16 MaxBlockCount) {
196
199
DCHECK_LT (ClassId, NumClasses);
197
200
SizeClassInfo *Sci = getSizeClassInfo (ClassId);
198
201
ScopedLock L (Sci->Mutex );
199
202
200
- u16 PopCount = popBlocksImpl (C, ClassId, Sci, ToArray, MaxBlockCount);
203
+ u16 PopCount =
204
+ popBlocksImpl (SizeClassAllocator, ClassId, Sci, ToArray, MaxBlockCount);
201
205
if (UNLIKELY (PopCount == 0 )) {
202
- if (UNLIKELY (!populateFreeList (C , ClassId, Sci)))
206
+ if (UNLIKELY (!populateFreeList (SizeClassAllocator , ClassId, Sci)))
203
207
return 0U ;
204
- PopCount = popBlocksImpl (C, ClassId, Sci, ToArray, MaxBlockCount);
208
+ PopCount = popBlocksImpl (SizeClassAllocator, ClassId, Sci, ToArray,
209
+ MaxBlockCount);
205
210
DCHECK_NE (PopCount, 0U );
206
211
}
207
212
208
213
return PopCount;
209
214
}
210
215
211
216
// Push the array of free blocks to the designated batch group.
212
- void pushBlocks (CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size ) {
217
+ void pushBlocks (SizeClassAllocatorT *SizeClassAllocator, uptr ClassId,
218
+ CompactPtrT *Array, u32 Size ) {
213
219
DCHECK_LT (ClassId, NumClasses);
214
220
DCHECK_GT (Size , 0 );
215
221
@@ -240,7 +246,7 @@ template <typename Config> class SizeClassAllocator32 {
240
246
}
241
247
242
248
ScopedLock L (Sci->Mutex );
243
- pushBlocksImpl (C , ClassId, Sci, Array, Size , SameGroup);
249
+ pushBlocksImpl (SizeClassAllocator , ClassId, Sci, Array, Size , SameGroup);
244
250
}
245
251
246
252
void disable () NO_THREAD_SAFETY_ANALYSIS {
@@ -529,8 +535,8 @@ template <typename Config> class SizeClassAllocator32 {
529
535
// memory group here.
530
536
BG->CompactPtrGroupBase = 0 ;
531
537
BG->BytesInBGAtLastCheckpoint = 0 ;
532
- BG->MaxCachedPerBatch =
533
- CacheT::getMaxCached ( getSizeByClassId (SizeClassMap::BatchClassId));
538
+ BG->MaxCachedPerBatch = SizeClassAllocatorT::getMaxCached (
539
+ getSizeByClassId (SizeClassMap::BatchClassId));
534
540
535
541
Sci->FreeListInfo .BlockList .push_front (BG);
536
542
}
@@ -597,18 +603,18 @@ template <typename Config> class SizeClassAllocator32 {
597
603
// same group then we will skip checking the group id of each block.
598
604
//
599
605
// The region mutex needs to be held while calling this method.
600
- void pushBlocksImpl (CacheT *C , uptr ClassId, SizeClassInfo *Sci ,
601
- CompactPtrT *Array, u32 Size , bool SameGroup = false )
602
- REQUIRES(Sci->Mutex) {
606
+ void pushBlocksImpl (SizeClassAllocatorT *SizeClassAllocator , uptr ClassId,
607
+ SizeClassInfo *Sci, CompactPtrT *Array, u32 Size ,
608
+ bool SameGroup = false ) REQUIRES(Sci->Mutex) {
603
609
DCHECK_NE (ClassId, SizeClassMap::BatchClassId);
604
610
DCHECK_GT (Size , 0U );
605
611
606
612
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
607
- BatchGroupT *BG =
608
- reinterpret_cast <BatchGroupT *>(C ->getBatchClassBlock ());
613
+ BatchGroupT *BG = reinterpret_cast <BatchGroupT *>(
614
+ SizeClassAllocator ->getBatchClassBlock ());
609
615
BG->Batches .clear ();
610
- TransferBatchT *TB =
611
- reinterpret_cast <TransferBatchT *>(C ->getBatchClassBlock ());
616
+ TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(
617
+ SizeClassAllocator ->getBatchClassBlock ());
612
618
TB->clear ();
613
619
614
620
BG->CompactPtrGroupBase = CompactPtrGroupBase;
@@ -629,8 +635,8 @@ template <typename Config> class SizeClassAllocator32 {
629
635
u16 UnusedSlots =
630
636
static_cast <u16>(BG->MaxCachedPerBatch - CurBatch->getCount ());
631
637
if (UnusedSlots == 0 ) {
632
- CurBatch =
633
- reinterpret_cast <TransferBatchT *>(C ->getBatchClassBlock ());
638
+ CurBatch = reinterpret_cast <TransferBatchT *>(
639
+ SizeClassAllocator ->getBatchClassBlock ());
634
640
CurBatch->clear ();
635
641
Batches.push_front (CurBatch);
636
642
UnusedSlots = BG->MaxCachedPerBatch ;
@@ -704,9 +710,9 @@ template <typename Config> class SizeClassAllocator32 {
704
710
InsertBlocks (Cur, Array + Size - Count, Count);
705
711
}
706
712
707
- u16 popBlocksImpl (CacheT *C , uptr ClassId, SizeClassInfo *Sci ,
708
- CompactPtrT *ToArray, const u16 MaxBlockCount)
709
- REQUIRES(Sci->Mutex) {
713
+ u16 popBlocksImpl (SizeClassAllocatorT *SizeClassAllocator , uptr ClassId,
714
+ SizeClassInfo *Sci, CompactPtrT *ToArray,
715
+ const u16 MaxBlockCount) REQUIRES(Sci->Mutex) {
710
716
if (Sci->FreeListInfo .BlockList .empty ())
711
717
return 0U ;
712
718
@@ -730,11 +736,11 @@ template <typename Config> class SizeClassAllocator32 {
730
736
// So far, instead of always filling the blocks to `MaxBlockCount`, we only
731
737
// examine single `TransferBatch` to minimize the time spent on the primary
732
738
// allocator. Besides, the sizes of `TransferBatch` and
733
- // `CacheT ::getMaxCached()` may also impact the time spent on accessing the
734
- // primary allocator.
739
+ // `SizeClassAllocatorT ::getMaxCached()` may also impact the time spent on
740
+ // accessing the primary allocator.
735
741
// TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
736
742
// blocks and/or adjust the size of `TransferBatch` according to
737
- // `CacheT ::getMaxCached()`.
743
+ // `SizeClassAllocatorT ::getMaxCached()`.
738
744
TransferBatchT *B = Batches.front ();
739
745
DCHECK_NE (B, nullptr );
740
746
DCHECK_GT (B->getCount (), 0U );
@@ -754,7 +760,7 @@ template <typename Config> class SizeClassAllocator32 {
754
760
// deallocate. Read the comment in `pushBatchClassBlocks()` for more
755
761
// details.
756
762
if (ClassId != SizeClassMap::BatchClassId)
757
- C ->deallocate (SizeClassMap::BatchClassId, B);
763
+ SizeClassAllocator ->deallocate (SizeClassMap::BatchClassId, B);
758
764
759
765
if (Batches.empty ()) {
760
766
BatchGroupT *BG = Sci->FreeListInfo .BlockList .front ();
@@ -766,15 +772,16 @@ template <typename Config> class SizeClassAllocator32 {
766
772
// Which means, once we pop the last TransferBatch, the block is
767
773
// implicitly deallocated.
768
774
if (ClassId != SizeClassMap::BatchClassId)
769
- C ->deallocate (SizeClassMap::BatchClassId, BG);
775
+ SizeClassAllocator ->deallocate (SizeClassMap::BatchClassId, BG);
770
776
}
771
777
}
772
778
773
779
Sci->FreeListInfo .PoppedBlocks += PopCount;
774
780
return PopCount;
775
781
}
776
782
777
- NOINLINE bool populateFreeList (CacheT *C, uptr ClassId, SizeClassInfo *Sci)
783
+ NOINLINE bool populateFreeList (SizeClassAllocatorT *SizeClassAllocator,
784
+ uptr ClassId, SizeClassInfo *Sci)
778
785
REQUIRES(Sci->Mutex) {
779
786
uptr Region;
780
787
uptr Offset;
@@ -791,13 +798,13 @@ template <typename Config> class SizeClassAllocator32 {
791
798
Region = allocateRegion (Sci, ClassId);
792
799
if (UNLIKELY (!Region))
793
800
return false ;
794
- C ->getStats ().add (StatMapped, RegionSize);
801
+ SizeClassAllocator ->getStats ().add (StatMapped, RegionSize);
795
802
Sci->CurrentRegion = Region;
796
803
Offset = 0 ;
797
804
}
798
805
799
806
const uptr Size = getSizeByClassId (ClassId);
800
- const u16 MaxCount = CacheT ::getMaxCached (Size );
807
+ const u16 MaxCount = SizeClassAllocatorT ::getMaxCached (Size );
801
808
DCHECK_GT (MaxCount, 0U );
802
809
// The maximum number of blocks we should carve in the region is dictated
803
810
// by the maximum number of batches we want to fill, and the amount of
@@ -827,7 +834,8 @@ template <typename Config> class SizeClassAllocator32 {
827
834
for (u32 I = 1 ; I < NumberOfBlocks; I++) {
828
835
if (UNLIKELY (compactPtrGroupBase (ShuffleArray[I]) != CurGroup)) {
829
836
shuffle (ShuffleArray + I - N, N, &Sci->RandState );
830
- pushBlocksImpl (C, ClassId, Sci, ShuffleArray + I - N, N,
837
+ pushBlocksImpl (SizeClassAllocator, ClassId, Sci, ShuffleArray + I - N,
838
+ N,
831
839
/* SameGroup=*/ true );
832
840
N = 1 ;
833
841
CurGroup = compactPtrGroupBase (ShuffleArray[I]);
@@ -837,7 +845,8 @@ template <typename Config> class SizeClassAllocator32 {
837
845
}
838
846
839
847
shuffle (ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState );
840
- pushBlocksImpl (C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
848
+ pushBlocksImpl (SizeClassAllocator, ClassId, Sci,
849
+ &ShuffleArray[NumberOfBlocks - N], N,
841
850
/* SameGroup=*/ true );
842
851
} else {
843
852
pushBatchClassBlocks (Sci, ShuffleArray, NumberOfBlocks);
@@ -850,7 +859,7 @@ template <typename Config> class SizeClassAllocator32 {
850
859
Sci->FreeListInfo .PushedBlocks -= NumberOfBlocks;
851
860
852
861
const uptr AllocatedUser = Size * NumberOfBlocks;
853
- C ->getStats ().add (StatFree, AllocatedUser);
862
+ SizeClassAllocator ->getStats ().add (StatFree, AllocatedUser);
854
863
DCHECK_LE (Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
855
864
// If there is not enough room in the region currently associated to fit
856
865
// more blocks, we deassociate the region by resetting CurrentRegion and
0 commit comments