@@ -34,7 +34,7 @@ namespace scudo {
3434// predictable address pattern (the predictability increases with the block
3535// size).
3636//
37- // Regions for size class 0 are special and used to hold TransferBatches , which
37+ // Regions for size class 0 are special and used to hold Batches , which
3838// allow to transfer arrays of pointers from the global size class freelist to
3939// the thread specific freelist for said class, and back.
4040//
@@ -56,12 +56,21 @@ template <typename Config> class SizeClassAllocator32 {
5656 typename Conditional<Config::getEnableBlockCache(),
5757 SizeClassAllocatorLocalCache<ThisT>,
5858 SizeClassAllocatorNoCache<ThisT>>::type;
59- typedef TransferBatch <ThisT> TransferBatchT ;
59+ typedef Batch <ThisT> BatchT ;
6060 typedef BatchGroup<ThisT> BatchGroupT;
61+ static const u16 MaxNumBlocksInBatch = SizeClassMap::MaxNumCachedHint;
62+
63+ static constexpr uptr getSizeOfBatchClass () {
64+ const uptr HeaderSize = sizeof (BatchT);
65+ return HeaderSize + sizeof (CompactPtrT) * MaxNumBlocksInBatch;
66+ }
67+
68+ static_assert (sizeof (BatchGroupT) <= getSizeOfBatchClass(),
69+ "BatchGroupT also uses BatchClass");
6170
6271 static uptr getSizeByClassId (uptr ClassId) {
6372 return (ClassId == SizeClassMap::BatchClassId)
64- ? Max ( sizeof (BatchGroupT), sizeof (TransferBatchT) )
73+ ? getSizeOfBatchClass ( )
6574 : SizeClassMap::getSizeByClassId (ClassId);
6675 }
6776
@@ -124,7 +133,7 @@ template <typename Config> class SizeClassAllocator32 {
124133
125134 // When all blocks are freed, it has to be the same size as `AllocatedUser`.
126135 void verifyAllBlocksAreReleasedTestOnly () {
127- // `BatchGroup` and `TransferBatch ` also use the blocks from BatchClass.
136+ // `BatchGroup` and `Batch ` also use the blocks from BatchClass.
128137 uptr BatchClassUsedInFreeLists = 0 ;
129138 for (uptr I = 0 ; I < NumClasses; I++) {
130139 // We have to count BatchClassUsedInFreeLists in other regions first.
@@ -134,7 +143,7 @@ template <typename Config> class SizeClassAllocator32 {
134143 ScopedLock L1 (Sci->Mutex );
135144 uptr TotalBlocks = 0 ;
136145 for (BatchGroupT &BG : Sci->FreeListInfo .BlockList ) {
137- // `BG::Batches` are `TransferBatches `. +1 for `BatchGroup`.
146+ // `BG::Batches` are `Batches `. +1 for `BatchGroup`.
138147 BatchClassUsedInFreeLists += BG.Batches .size () + 1 ;
139148 for (const auto &It : BG.Batches )
140149 TotalBlocks += It.getCount ();
@@ -153,7 +162,7 @@ template <typename Config> class SizeClassAllocator32 {
153162 for (const auto &It : BG.Batches )
154163 TotalBlocks += It.getCount ();
155164 } else {
156- // `BatchGroup` with empty freelist doesn't have `TransferBatch ` record
165+ // `BatchGroup` with empty freelist doesn't have `Batch ` record
157166 // itself.
158167 ++TotalBlocks;
159168 }
@@ -487,39 +496,39 @@ template <typename Config> class SizeClassAllocator32 {
487496 REQUIRES(Sci->Mutex) {
488497 DCHECK_EQ (Sci, getSizeClassInfo (SizeClassMap::BatchClassId));
489498
490- // Free blocks are recorded by TransferBatch in freelist for all
491- // size-classes. In addition, TransferBatch is allocated from BatchClassId.
499+ // Free blocks are recorded by Batch in freelist for all
500+ // size-classes. In addition, Batch is allocated from BatchClassId.
492501 // In order not to use additional block to record the free blocks in
493- // BatchClassId, they are self-contained. I.e., A TransferBatch records the
502+ // BatchClassId, they are self-contained. I.e., A Batch records the
494503 // block address of itself. See the figure below:
495504 //
496- // TransferBatch at 0xABCD
505+ // Batch at 0xABCD
497506 // +----------------------------+
498507 // | Free blocks' addr |
499508 // | +------+------+------+ |
500509 // | |0xABCD|... |... | |
501510 // | +------+------+------+ |
502511 // +----------------------------+
503512 //
504- // When we allocate all the free blocks in the TransferBatch , the block used
505- // by TransferBatch is also free for use. We don't need to recycle the
506- // TransferBatch . Note that the correctness is maintained by the invariant,
513+ // When we allocate all the free blocks in the Batch , the block used
514+ // by Batch is also free for use. We don't need to recycle the
515+ // Batch . Note that the correctness is maintained by the invariant,
507516 //
508- // Each popBlocks() request returns the entire TransferBatch . Returning
509- // part of the blocks in a TransferBatch is invalid.
517+ // Each popBlocks() request returns the entire Batch . Returning
518+ // part of the blocks in a Batch is invalid.
510519 //
511- // This ensures that TransferBatch won't leak the address itself while it's
520+ // This ensures that Batch won't leak the address itself while it's
512521 // still holding other valid data.
513522 //
514523 // Besides, BatchGroup is also allocated from BatchClassId and has its
515- // address recorded in the TransferBatch too. To maintain the correctness,
524+ // address recorded in the Batch too. To maintain the correctness,
516525 //
517- // The address of BatchGroup is always recorded in the last TransferBatch
526+ // The address of BatchGroup is always recorded in the last Batch
518527 // in the freelist (also imply that the freelist should only be
519- // updated with push_front). Once the last TransferBatch is popped,
528+ // updated with push_front). Once the last Batch is popped,
520529 // the block used by BatchGroup is also free for use.
521530 //
522- // With this approach, the blocks used by BatchGroup and TransferBatch are
531+ // With this approach, the blocks used by BatchGroup and Batch are
523532 // reusable and don't need additional space for them.
524533
525534 Sci->FreeListInfo .PushedBlocks += Size;
@@ -548,27 +557,27 @@ template <typename Config> class SizeClassAllocator32 {
548557 // 1. just allocated a new `BatchGroup`.
549558 // 2. Only 1 block is pushed when the freelist is empty.
550559 if (BG->Batches .empty ()) {
551- // Construct the `TransferBatch ` on the last element.
552- TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(
560+ // Construct the `Batch ` on the last element.
561+ BatchT *TB = reinterpret_cast <BatchT *>(
553562 decompactPtr (SizeClassMap::BatchClassId, Array[Size - 1 ]));
554563 TB->clear ();
555- // As mentioned above, addresses of `TransferBatch ` and `BatchGroup` are
556- // recorded in the TransferBatch .
564+ // As mentioned above, addresses of `Batch ` and `BatchGroup` are
565+ // recorded in the Batch .
557566 TB->add (Array[Size - 1 ]);
558567 TB->add (
559568 compactPtr (SizeClassMap::BatchClassId, reinterpret_cast <uptr>(BG)));
560569 --Size;
561570 BG->Batches .push_front (TB);
562571 }
563572
564- TransferBatchT *CurBatch = BG->Batches .front ();
573+ BatchT *CurBatch = BG->Batches .front ();
565574 DCHECK_NE (CurBatch, nullptr );
566575
567576 for (u32 I = 0 ; I < Size;) {
568577 u16 UnusedSlots =
569578 static_cast <u16 >(BG->MaxCachedPerBatch - CurBatch->getCount ());
570579 if (UnusedSlots == 0 ) {
571- CurBatch = reinterpret_cast <TransferBatchT *>(
580+ CurBatch = reinterpret_cast <BatchT *>(
572581 decompactPtr (SizeClassMap::BatchClassId, Array[I]));
573582 CurBatch->clear ();
574583 // Self-contained
@@ -596,7 +605,7 @@ template <typename Config> class SizeClassAllocator32 {
596605 // TB
597606 //
598607 // Each BlockGroup(BG) will associate with unique group id and the free blocks
599- // are managed by a list of TransferBatch (TB). To reduce the time of inserting
608+ // are managed by a list of Batch (TB). To reduce the time of inserting
600609 // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
601610 // that we can get better performance of maintaining sorted property.
602611 // Use `SameGroup=true` to indicate that all blocks in the array are from the
@@ -613,29 +622,29 @@ template <typename Config> class SizeClassAllocator32 {
613622 BatchGroupT *BG = reinterpret_cast <BatchGroupT *>(
614623 SizeClassAllocator->getBatchClassBlock ());
615624 BG->Batches .clear ();
616- TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(
617- SizeClassAllocator->getBatchClassBlock ());
625+ BatchT *TB =
626+ reinterpret_cast <BatchT *>( SizeClassAllocator->getBatchClassBlock ());
618627 TB->clear ();
619628
620629 BG->CompactPtrGroupBase = CompactPtrGroupBase;
621630 BG->Batches .push_front (TB);
622631 BG->BytesInBGAtLastCheckpoint = 0 ;
623- BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached ;
632+ BG->MaxCachedPerBatch = MaxNumBlocksInBatch ;
624633
625634 return BG;
626635 };
627636
628637 auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
629- SinglyLinkedList<TransferBatchT > &Batches = BG->Batches ;
630- TransferBatchT *CurBatch = Batches.front ();
638+ SinglyLinkedList<BatchT > &Batches = BG->Batches ;
639+ BatchT *CurBatch = Batches.front ();
631640 DCHECK_NE (CurBatch, nullptr );
632641
633642 for (u32 I = 0 ; I < Size;) {
634643 DCHECK_GE (BG->MaxCachedPerBatch , CurBatch->getCount ());
635644 u16 UnusedSlots =
636645 static_cast <u16 >(BG->MaxCachedPerBatch - CurBatch->getCount ());
637646 if (UnusedSlots == 0 ) {
638- CurBatch = reinterpret_cast <TransferBatchT *>(
647+ CurBatch = reinterpret_cast <BatchT *>(
639648 SizeClassAllocator->getBatchClassBlock ());
640649 CurBatch->clear ();
641650 Batches.push_front (CurBatch);
@@ -716,7 +725,7 @@ template <typename Config> class SizeClassAllocator32 {
716725 if (Sci->FreeListInfo .BlockList .empty ())
717726 return 0U ;
718727
719- SinglyLinkedList<TransferBatchT > &Batches =
728+ SinglyLinkedList<BatchT > &Batches =
720729 Sci->FreeListInfo .BlockList .front ()->Batches ;
721730
722731 if (Batches.empty ()) {
@@ -725,27 +734,27 @@ template <typename Config> class SizeClassAllocator32 {
725734 Sci->FreeListInfo .BlockList .pop_front ();
726735
727736 // Block used by `BatchGroup` is from BatchClassId. Turn the block into
728- // `TransferBatch ` with single block.
729- TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(BG);
737+ // `Batch ` with single block.
738+ BatchT *TB = reinterpret_cast <BatchT *>(BG);
730739 ToArray[0 ] =
731740 compactPtr (SizeClassMap::BatchClassId, reinterpret_cast <uptr>(TB));
732741 Sci->FreeListInfo .PoppedBlocks += 1 ;
733742 return 1U ;
734743 }
735744
736745 // So far, instead of always filling the blocks to `MaxBlockCount`, we only
737- // examine single `TransferBatch ` to minimize the time spent on the primary
738- // allocator. Besides, the sizes of `TransferBatch ` and
746+ // examine single `Batch ` to minimize the time spent on the primary
747+ // allocator. Besides, the sizes of `Batch ` and
739748 // `SizeClassAllocatorT::getMaxCached()` may also impact the time spent on
740749 // accessing the primary allocator.
741750 // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
742- // blocks and/or adjust the size of `TransferBatch ` according to
751+ // blocks and/or adjust the size of `Batch ` according to
743752 // `SizeClassAllocatorT::getMaxCached()`.
744- TransferBatchT *B = Batches.front ();
753+ BatchT *B = Batches.front ();
745754 DCHECK_NE (B, nullptr );
746755 DCHECK_GT (B->getCount (), 0U );
747756
748- // BachClassId should always take all blocks in the TransferBatch . Read the
757+ // BachClassId should always take all blocks in the Batch . Read the
749758 // comment in `pushBatchClassBlocks()` for more details.
750759 const u16 PopCount = ClassId == SizeClassMap::BatchClassId
751760 ? B->getCount ()
@@ -756,7 +765,7 @@ template <typename Config> class SizeClassAllocator32 {
756765 // done without holding `Mutex`.
757766 if (B->empty ()) {
758767 Batches.pop_front ();
759- // `TransferBatch ` of BatchClassId is self-contained, no need to
768+ // `Batch ` of BatchClassId is self-contained, no need to
760769 // deallocate. Read the comment in `pushBatchClassBlocks()` for more
761770 // details.
762771 if (ClassId != SizeClassMap::BatchClassId)
@@ -769,7 +778,7 @@ template <typename Config> class SizeClassAllocator32 {
769778 // We don't keep BatchGroup with zero blocks to avoid empty-checking
770779 // while allocating. Note that block used for constructing BatchGroup is
771780 // recorded as free blocks in the last element of BatchGroup::Batches.
772- // Which means, once we pop the last TransferBatch , the block is
781+ // Which means, once we pop the last Batch , the block is
773782 // implicitly deallocated.
774783 if (ClassId != SizeClassMap::BatchClassId)
775784 SizeClassAllocator->deallocate (SizeClassMap::BatchClassId, BG);
@@ -816,8 +825,7 @@ template <typename Config> class SizeClassAllocator32 {
816825 static_cast <u32 >((RegionSize - Offset) / Size));
817826 DCHECK_GT (NumberOfBlocks, 0U );
818827
819- constexpr u32 ShuffleArraySize =
820- MaxNumBatches * TransferBatchT::MaxNumCached;
828+ constexpr u32 ShuffleArraySize = MaxNumBatches * MaxNumBlocksInBatch;
821829 // Fill the transfer batches and put them in the size-class freelist. We
822830 // need to randomize the blocks for security purposes, so we first fill a
823831 // local array that we then shuffle before populating the batches.
@@ -1102,7 +1110,7 @@ template <typename Config> class SizeClassAllocator32 {
11021110 if (AllocatedGroupSize == 0 )
11031111 continue ;
11041112
1105- // TransferBatches are pushed in front of BG.Batches. The first one may
1113+ // Batches are pushed in front of BG.Batches. The first one may
11061114 // not have all caches used.
11071115 const uptr NumBlocks = (BG.Batches .size () - 1 ) * BG.MaxCachedPerBatch +
11081116 BG.Batches .front ()->getCount ();
0 commit comments