Lines Matching +full:batch +full:- +full:reduce

1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
26 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
49 // The bytemap can only track UINT8_MAX - 1 classes.
50 static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
83 Sci->RandState = getRandomU32(&Seed);
84 // Sci->MaxRegionIndex is already initialized to 0.
85 Sci->MinRegionIndex = NumRegions;
86 Sci->ReleaseInfo.LastReleaseAtNs = Time;
99 unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
107 ScopedLock L(Sci->Mutex);
108 if (Sci->MinRegionIndex < MinRegionIndex)
109 MinRegionIndex = Sci->MinRegionIndex;
110 if (Sci->MaxRegionIndex > MaxRegionIndex)
111 MaxRegionIndex = Sci->MaxRegionIndex;
131 ScopedLock L1(Sci->Mutex);
133 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
141 DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
142 DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
146 ScopedLock L1(Sci->Mutex);
148 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
161 Sci->AllocatedUser / BlockSize);
163 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
176 const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
198 ScopedLock L(Sci->Mutex);
211 // Push the array of free blocks to the designated batch group.
218 ScopedLock L(Sci->Mutex);
230 if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
235 compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
236 Array[J] = Array[J - 1];
237 --J;
242 ScopedLock L(Sci->Mutex);
248 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
251 getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
253 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
261 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
265 getSizeClassInfo(I)->Mutex.unlock();
276 Sci->Mutex.assertHeld();
277 if (Sci->MinRegionIndex < MinRegionIndex)
278 MinRegionIndex = Sci->MinRegionIndex;
279 if (Sci->MaxRegionIndex > MaxRegionIndex)
280 MaxRegionIndex = Sci->MaxRegionIndex;
288 (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
289 const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
305 ScopedLock L(Sci->Mutex);
306 TotalMapped += Sci->AllocatedUser;
307 PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
308 PushedBlocks += Sci->FreeListInfo.PushedBlocks;
310 Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
312 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
315 ScopedLock L(Sci->Mutex);
321 Str->append(
327 ScopedLock L(Sci->Mutex);
354 ScopedLock L(Sci->Mutex);
364 ScopedLock L(Sci->Mutex);
438 unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
443 unmap(reinterpret_cast<void *>(End), MapEnd - End);
452 uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
458 Region = RegionsStash[--NumberOfStashedRegions];
463 // Sci->Mutex is held by the caller, updating the Min/Max is safe.
465 if (RegionIndex < Sci->MinRegionIndex)
466 Sci->MinRegionIndex = RegionIndex;
467 if (RegionIndex > Sci->MaxRegionIndex)
468 Sci->MaxRegionIndex = RegionIndex;
481 REQUIRES(Sci->Mutex) {
485 // size-classes. In addition, TransferBatch is allocated from BatchClassId.
487 // BatchClassId, they are self-contained. I.e., A TransferBatch records the
491 // +----------------------------+
493 // | +------+------+------+ |
495 // | +------+------+------+ |
496 // +----------------------------+
519 Sci->FreeListInfo.PushedBlocks += Size;
520 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
525 decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
526 --Size;
527 BG->Batches.clear();
530 BG->CompactPtrGroupBase = 0;
531 BG->BytesInBGAtLastCheckpoint = 0;
532 BG->MaxCachedPerBatch =
535 Sci->FreeListInfo.BlockList.push_front(BG);
544 if (BG->Batches.empty()) {
547 decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
548 TB->clear();
551 TB->add(Array[Size - 1]);
552 TB->add(
554 --Size;
555 BG->Batches.push_front(TB);
558 TransferBatchT *CurBatch = BG->Batches.front();
563 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
567 CurBatch->clear();
568 // Self-contained
569 CurBatch->add(Array[I]);
573 BG->Batches.push_front(CurBatch);
574 UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
577 const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
578 CurBatch->appendFromArray(&Array[I], AppendSize);
582 // Push the blocks to their batch group. The layout will be like,
584 // FreeListInfo.BlockList - > BG -> BG -> BG
593 // are managed by a list of TransferBatch(TB). To reduce the time of inserting
602 REQUIRES(Sci->Mutex) {
608 reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
609 BG->Batches.clear();
611 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
612 TB->clear();
614 BG->CompactPtrGroupBase = CompactPtrGroupBase;
615 BG->Batches.push_front(TB);
616 BG->BytesInBGAtLastCheckpoint = 0;
617 BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
623 SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
628 DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
630 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
633 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
634 CurBatch->clear();
636 UnusedSlots = BG->MaxCachedPerBatch;
639 u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
640 CurBatch->appendFromArray(&Array[I], AppendSize);
645 Sci->FreeListInfo.PushedBlocks += Size;
646 BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
653 compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
655 Cur = Cur->Next;
659 compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
662 Sci->FreeListInfo.BlockList.push_front(Cur);
664 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
671 DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
681 if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
682 DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
683 InsertBlocks(Cur, Array + I - Count, Count);
686 compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
688 Cur = Cur->Next;
692 compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
695 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
704 InsertBlocks(Cur, Array + Size - Count, Count);
709 REQUIRES(Sci->Mutex) {
710 if (Sci->FreeListInfo.BlockList.empty())
714 Sci->FreeListInfo.BlockList.front()->Batches;
718 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
719 Sci->FreeListInfo.BlockList.pop_front();
726 Sci->FreeListInfo.PoppedBlocks += 1;
740 DCHECK_GT(B->getCount(), 0U);
745 ? B->getCount()
746 : Min(MaxBlockCount, B->getCount());
747 B->moveNToArray(ToArray, PopCount);
751 if (B->empty()) {
753 // `TransferBatch` of BatchClassId is self-contained, no need to
757 C->deallocate(SizeClassMap::BatchClassId, B);
760 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
761 Sci->FreeListInfo.BlockList.pop_front();
763 // We don't keep BatchGroup with zero blocks to avoid empty-checking
769 C->deallocate(SizeClassMap::BatchClassId, BG);
773 Sci->FreeListInfo.PoppedBlocks += PopCount;
778 REQUIRES(Sci->Mutex) {
781 // If the size-class currently has a region associated to it, use it. The
785 if (Sci->CurrentRegion) {
786 Region = Sci->CurrentRegion;
787 DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
788 Offset = Sci->CurrentRegionAllocated;
790 DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
794 C->getStats().add(StatMapped, RegionSize);
795 Sci->CurrentRegion = Region;
809 static_cast<u32>((RegionSize - Offset) / Size));
814 // Fill the transfer batches and put them in the size-class freelist. We
829 shuffle(ShuffleArray + I - N, N, &Sci->RandState);
830 pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
839 shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
840 pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
850 Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
853 C->getStats().add(StatFree, AllocatedUser);
854 DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
858 if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
859 Sci->CurrentRegion = 0;
860 Sci->CurrentRegionAllocated = 0;
862 Sci->CurrentRegionAllocated += AllocatedUser;
864 Sci->AllocatedUser += AllocatedUser;
870 REQUIRES(Sci->Mutex) {
871 if (Sci->AllocatedUser == 0)
875 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
876 const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
878 if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
880 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
882 const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
883 Str->append(
887 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
888 Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks, InUse,
889 AvailableChunks, Sci->ReleaseInfo.NumReleasesAttempted,
890 Sci->ReleaseInfo.LastReleasedBytes >> 10, PushedBytesDelta >> 10);
894 ScopedString *Str) REQUIRES(Sci->Mutex) {
896 const uptr First = Sci->MinRegionIndex;
897 const uptr Last = Sci->MaxRegionIndex;
899 const uptr NumberOfRegions = Last - First + 1U;
902 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
906 if (!Sci->FreeListInfo.BlockList.empty()) {
914 const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
916 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
930 AllocatedPagesCount - Recorder.getReleasedPagesCount();
937 Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
945 REQUIRES(Sci->Mutex) {
948 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
950 Sci->AllocatedUser -
951 (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
967 const uptr First = Sci->MinRegionIndex;
968 const uptr Last = Sci->MaxRegionIndex;
973 const uptr NumberOfRegions = Last - First + 1U;
977 ++Sci->ReleaseInfo.NumReleasesAttempted;
980 // 2. Mark the free blocks and we can tell which pages are in-use by
994 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
999 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1000 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
1001 TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
1003 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
1010 REQUIRES(Sci->Mutex) {
1011 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
1014 if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
1015 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1023 // |--------------------------------------->
1032 // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
1034 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1042 if (PushedBytesDelta < Sci->AllocatedUser / 16U)
1057 if (Sci->ReleaseInfo.LastReleaseAtNs +
1073 REQUIRES(Sci->Mutex) {
1077 compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
1085 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
1091 ? Sci->CurrentRegionAllocated
1098 const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1099 BG.Batches.front()->getCount();
1108 const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
1116 (100U - 1U - BlockSize / 16U)) {
1126 const uptr RegionIndex = (GroupBase - Base) / RegionSize;