Lines Matching +full:batch +full:- +full:reduce
1 //===-- primary64.h ---------------------------------------------*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
28 // SizeClassAllocator64 is an allocator tuned for 64-bit address space.
58 static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
95 // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these
99 // constraint is determined by the size of in-use blocks in the minimal size
102 // +- one memory group -+
103 // +----------------------+------+
105 // +----------------------+------+
107 // 3% in-use blocks
116 // use its size of in-use blocks as a heuristic.
126 getRegionInfo(I)->RandState = getRandomU32(&Seed);
148 getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
160 ScopedLock ML(Region->MMLock);
161 MemMapT MemMap = Region->MemMapInfo.MemMap;
178 ScopedLock ML(Region->MMLock);
179 ScopedLock FL(Region->FLLock);
182 for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
189 DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
190 DCHECK_EQ(Region->FreeListInfo.PushedBlocks,
191 Region->FreeListInfo.PoppedBlocks);
195 ScopedLock ML(Region->MMLock);
196 ScopedLock FL(Region->FLLock);
199 for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
210 Region->MemMapInfo.AllocatedUser / BlockSize);
211 DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
212 Region->FreeListInfo.PushedBlocks);
214 Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
225 ScopedLock L(Region->FLLock);
238 // When two threads compete for `Region->MMLock`, we only want one of
241 ScopedLock ML(Region->MMLock);
243 ScopedLock FL(Region->FLLock);
249 const bool RegionIsExhausted = Region->Exhausted;
254 ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
272 // Push the array of free blocks to the designated batch group.
279 ScopedLock L(Region->FLLock);
282 Region->FLLockCV.notifyAll(Region->FLLock);
294 if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
298 while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
299 Array[J] = Array[J - 1];
300 --J;
307 ScopedLock L(Region->FLLock);
310 Region->FLLockCV.notifyAll(Region->FLLock);
316 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
319 getRegionInfo(static_cast<uptr>(I))->MMLock.lock();
320 getRegionInfo(static_cast<uptr>(I))->FLLock.lock();
322 getRegionInfo(SizeClassMap::BatchClassId)->MMLock.lock();
323 getRegionInfo(SizeClassMap::BatchClassId)->FLLock.lock();
327 getRegionInfo(SizeClassMap::BatchClassId)->FLLock.unlock();
328 getRegionInfo(SizeClassMap::BatchClassId)->MMLock.unlock();
332 getRegionInfo(I)->FLLock.unlock();
333 getRegionInfo(I)->MMLock.unlock();
345 Region->FLLock.assertHeld();
346 Region->MMLock.assertHeld();
348 const uptr From = Region->RegionBeg;
349 const uptr To = From + Region->MemMapInfo.AllocatedUser;
363 ScopedLock L(Region->MMLock);
364 TotalMapped += Region->MemMapInfo.MappedUser;
367 ScopedLock L(Region->FLLock);
368 PoppedBlocks += Region->FreeListInfo.PoppedBlocks;
369 PushedBlocks += Region->FreeListInfo.PushedBlocks;
373 Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
376 PoppedBlocks - PushedBlocks, IntervalMs >= 0 ? IntervalMs : -1);
380 ScopedLock L1(Region->MMLock);
381 ScopedLock L2(Region->FLLock);
387 Str->append(
393 ScopedLock L(Region->MMLock);
399 Str->append(
405 ScopedLock L(Region->MMLock);
427 if (Region->MMLock.tryLock()) {
429 Region->MMLock.unlock();
441 ScopedLock L(Region->MMLock);
454 return getRegionInfo(ClassId)->RegionBeg;
474 uptr MinDistance = -1UL;
486 if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
493 RegionDistance = Ptr - End;
495 RegionDistance = Begin - Ptr;
511 B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
516 B.BlockBegin -= B.BlockSize;
573 char Padding[SCUDO_CACHE_LINE_SIZE -
585 Region->MMLock.assertHeld();
588 !Region->MemMapInfo.MemMap.isAllocated()) {
591 return Region->MemMapInfo.MemMap.getBase();
595 return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale);
603 const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1;
622 REQUIRES(Region->MMLock) {
623 DCHECK(!Region->MemMapInfo.MemMap.isAllocated());
628 Region->MemMapInfo.MemMap = MemMap;
630 Region->RegionBeg = MemMap.getBase();
632 Region->RegionBeg +=
633 (getRandomModN(&Region->RandState, 16) + 1) * PageSize;
640 Region->ReleaseInfo.TryReleaseThreshold =
643 Region->ReleaseInfo.TryReleaseThreshold =
649 REQUIRES(Region->FLLock) {
653 // size-classes. In addition, TransferBatch is allocated from BatchClassId.
655 // BatchClassId, they are self-contained. I.e., A TransferBatch records the
659 // +----------------------------+
661 // | +------+------+------+ |
663 // | +------+------+------+ |
664 // +----------------------------+
687 Region->FreeListInfo.PushedBlocks += Size;
688 BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
693 decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
694 --Size;
695 BG->Batches.clear();
698 BG->CompactPtrGroupBase = 0;
699 BG->BytesInBGAtLastCheckpoint = 0;
700 BG->MaxCachedPerBatch =
703 Region->FreeListInfo.BlockList.push_front(BG);
712 if (BG->Batches.empty()) {
715 decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
716 TB->clear();
719 TB->add(Array[Size - 1]);
720 TB->add(
722 --Size;
723 BG->Batches.push_front(TB);
726 TransferBatchT *CurBatch = BG->Batches.front();
731 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
735 CurBatch->clear();
736 // Self-contained
737 CurBatch->add(Array[I]);
741 BG->Batches.push_front(CurBatch);
742 UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
745 const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
746 CurBatch->appendFromArray(&Array[I], AppendSize);
751 // Push the blocks to their batch group. The layout will be like,
753 // FreeListInfo.BlockList - > BG -> BG -> BG
762 // are managed by a list of TransferBatch(TB). To reduce the time of inserting
769 REQUIRES(Region->FLLock) {
775 reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
776 BG->Batches.clear();
778 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
779 TB->clear();
781 BG->CompactPtrGroupBase = CompactPtrGroupBase;
782 BG->Batches.push_front(TB);
783 BG->BytesInBGAtLastCheckpoint = 0;
784 BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
790 SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
795 DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
797 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
800 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
801 CurBatch->clear();
803 UnusedSlots = BG->MaxCachedPerBatch;
806 u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
807 CurBatch->appendFromArray(&Array[I], AppendSize);
812 Region->FreeListInfo.PushedBlocks += Size;
813 BatchGroupT *Cur = Region->FreeListInfo.BlockList.front();
820 compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
822 Cur = Cur->Next;
826 compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
829 Region->FreeListInfo.BlockList.push_front(Cur);
831 Region->FreeListInfo.BlockList.insert(Prev, Cur);
838 DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase);
848 if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
849 DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase);
850 InsertBlocks(Cur, Array + I - Count, Count);
853 compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) {
855 Cur = Cur->Next;
859 compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
862 Region->FreeListInfo.BlockList.insert(Prev, Cur);
871 InsertBlocks(Cur, Array + Size - Count, Count);
886 ScopedLock FL(Region->FLLock);
887 if (!Region->isPopulatingFreeList) {
888 Region->isPopulatingFreeList = true;
894 ScopedLock ML(Region->MMLock);
896 const bool RegionIsExhausted = Region->Exhausted;
901 ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
907 // `Region->isPopulatingFreeList` to false so the threads about to
909 ScopedLock FL(Region->FLLock);
910 Region->isPopulatingFreeList = false;
911 Region->FLLockCV.notifyAll(Region->FLLock);
919 // 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
923 // Region->isPopulatingFreeList == false because all the new populated
926 ScopedLock FL(Region->FLLock);
931 if (!Region->isPopulatingFreeList)
939 Region->FLLockCV.wait(Region->FLLock);
951 REQUIRES(Region->FLLock) {
952 if (Region->FreeListInfo.BlockList.empty())
956 Region->FreeListInfo.BlockList.front()->Batches;
960 BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
961 Region->FreeListInfo.BlockList.pop_front();
968 Region->FreeListInfo.PoppedBlocks += 1;
982 DCHECK_GT(B->getCount(), 0U);
987 ? B->getCount()
988 : Min(MaxBlockCount, B->getCount());
989 B->moveNToArray(ToArray, PopCount);
993 if (B->empty()) {
995 // `TransferBatch` of BatchClassId is self-contained, no need to
999 C->deallocate(SizeClassMap::BatchClassId, B);
1002 BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
1003 Region->FreeListInfo.BlockList.pop_front();
1005 // We don't keep BatchGroup with zero blocks to avoid empty-checking
1011 C->deallocate(SizeClassMap::BatchClassId, BG);
1015 Region->FreeListInfo.PoppedBlocks += PopCount;
1024 REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
1026 !Region->MemMapInfo.MemMap.isAllocated()) {
1041 DCHECK(Region->MemMapInfo.MemMap.isAllocated());
1044 const uptr RegionBeg = Region->RegionBeg;
1045 const uptr MappedUser = Region->MemMapInfo.MappedUser;
1047 Region->MemMapInfo.AllocatedUser + MaxCount * Size;
1052 roundUp(TotalUserBytes - MappedUser, MapSizeIncrement);
1053 const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
1055 Region->Exhausted = true;
1059 if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
1066 Region->MemMapInfo.MappedUser += MapSize;
1067 C->getStats().add(StatMapped, MapSize);
1072 static_cast<u32>((Region->MemMapInfo.MappedUser -
1073 Region->MemMapInfo.AllocatedUser) /
1083 uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser;
1087 ScopedLock L(Region->FLLock);
1094 shuffle(ShuffleArray + I - N, N, &Region->RandState);
1095 pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N,
1104 shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState);
1105 pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N,
1119 Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;
1122 C->getStats().add(StatFree, AllocatedUser);
1123 Region->MemMapInfo.AllocatedUser += AllocatedUser;
1129 REQUIRES(Region->MMLock, Region->FLLock) {
1130 if (Region->MemMapInfo.MappedUser == 0)
1134 Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
1136 Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize;
1139 Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
1141 BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1143 const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize;
1144 Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
1148 Region->Exhausted ? "E" : " ", ClassId,
1149 getSizeByClassId(ClassId), Region->MemMapInfo.MappedUser >> 10,
1150 Region->FreeListInfo.PoppedBlocks,
1151 Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks,
1152 Region->ReleaseInfo.NumReleasesAttempted,
1153 Region->ReleaseInfo.LastReleasedBytes >> 10,
1154 RegionPushedBytesDelta >> 10, Region->RegionBeg,
1159 ScopedString *Str) REQUIRES(Region->MMLock) {
1162 Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
1166 ScopedLock L(Region->FLLock);
1167 GroupsToRelease = Region->FreeListInfo.BlockList;
1168 Region->FreeListInfo.BlockList.clear();
1182 ScopedLock L(Region->FLLock);
1184 const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize;
1186 Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
1188 roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize;
1191 AllocatedPagesCount - Recorder.getReleasedPagesCount();
1198 Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
1206 REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
1209 Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
1213 ScopedLock L(Region->FLLock);
1214 GroupsToRelease = Region->FreeListInfo.BlockList;
1215 Region->FreeListInfo.BlockList.clear();
1232 Str->append("MemoryGroupFragmentationInfo in Region %zu (%zu)\n", ClassId,
1236 roundUp(Region->MemMapInfo.AllocatedUser, GroupSize) / GroupSize;
1240 computePercentage(Recorder.NumPagesInOneGroup -
1243 Str->append("MemoryGroup #%zu (0x%zx): util: %3zu.%02zu%%\n", I,
1244 Region->RegionBeg + I * GroupSize, Integral, Fractional);
1250 REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
1254 Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
1259 ScopedLock L(Region->FLLock);
1261 BytesInFreeList = Region->MemMapInfo.AllocatedUser -
1262 (Region->FreeListInfo.PoppedBlocks -
1263 Region->FreeListInfo.PushedBlocks) *
1282 BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1289 GroupsToRelease = Region->FreeListInfo.BlockList;
1290 Region->FreeListInfo.BlockList.clear();
1302 ++Region->ReleaseInfo.NumReleasesAttempted;
1311 // Then we can tell which pages are in-use by querying
1325 RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
1326 Region->RegionBeg,
1339 if (RegionPushedBytesDelta < Region->ReleaseInfo.TryReleaseThreshold &&
1341 Region->ReleaseInfo.LastReleasedBytes +
1343 Region->ReleaseInfo.TryReleaseThreshold =
1344 Max(Region->ReleaseInfo.TryReleaseThreshold / 2,
1349 Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1350 Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
1352 Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
1354 if (Region->ReleaseInfo.PendingPushedBytesDelta > 0) {
1359 Region->ReleaseInfo.TryReleaseThreshold +=
1360 Region->ReleaseInfo.PendingPushedBytesDelta / 2;
1364 Region->ReleaseInfo.TryReleaseThreshold = Min<uptr>(
1365 Region->ReleaseInfo.TryReleaseThreshold, (1UL << GroupSizeLog) / 2);
1366 Region->ReleaseInfo.PendingPushedBytesDelta = 0;
1379 REQUIRES(Region->MMLock, Region->FLLock) {
1380 DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
1381 Region->FreeListInfo.PushedBlocks);
1388 // |--------------------------------------->
1397 // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
1399 Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
1400 Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1404 BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1407 if (RegionPushedBytesDelta < Region->ReleaseInfo.TryReleaseThreshold / 2)
1417 CurTimeNs - Region->ReleaseInfo.LastReleaseAtNs;
1423 if (RegionPushedBytesDelta < Region->ReleaseInfo.TryReleaseThreshold) {
1439 Region->ReleaseInfo.PendingPushedBytesDelta = RegionPushedBytesDelta;
1452 REQUIRES(Region->MMLock, Region->FLLock) {
1464 for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
1467 // Group boundary is always GroupSize-aligned from CompactPtr base. The
1474 // +-----------------------+-----------------------+
1476 // --- GroupSize --- --- GroupSize ---
1481 decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase);
1482 DCHECK_LE(Region->RegionBeg, BatchGroupBase);
1484 DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U);
1487 const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
1488 BG->Batches.front()->getCount();
1491 if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) {
1492 BG->BytesInBGAtLastCheckpoint = BytesInBG;
1494 BG = BG->Next;
1498 const uptr PushedBytesDelta = BytesInBG - BG->BytesInBGAtLastCheckpoint;
1501 BG = BG->Next;
1513 : AllocatedUserEnd - BatchGroupBase;
1515 (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U;
1532 // +---+---------------------------+-----+
1534 // +---+---------------------------+-----+
1539 ReleaseThreshold - BytesInBG + PushedBytesDelta);
1549 BG = BG->Next;
1558 // (BG) (BG->Next)
1562 // nil +--+ +--+
1563 // |X | -> | | -> ...
1564 // +--+ +--+
1569 // (BG) (BG->Next)
1573 // +--+ +--+ +--+
1574 // | | -> |X | -> | | -> ...
1575 // +--+ +--+ +--+
1582 // +--+ +--+ +--+
1583 // | |-+ |X | +->| | -> ...
1584 // +--+ | +--+ | +--+
1585 // +--------+
1591 BG = BG->Next;
1598 Cur->BytesInBGAtLastCheckpoint = BytesInBG;
1601 Region->FreeListInfo.BlockList.extract(Prev, Cur);
1603 Region->FreeListInfo.BlockList.pop_front();
1614 Region->ReleaseInfo.TryReleaseThreshold = MinDistToThreshold;
1624 REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
1631 CompactPtrBase, GroupsToRelease.front()->CompactPtrGroupBase);
1634 GroupsToRelease.back()->CompactPtrGroupBase) +
1640 roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
1641 Region->RegionBeg;
1642 const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
1643 const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
1658 : AllocatedUserEnd - BatchGroupBase;
1663 (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
1669 const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1670 BG.Batches.front()->getCount();
1681 Region->RegionBeg, /*RegionIndex=*/0,
1682 Region->MemMapInfo.AllocatedUser);
1689 BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0,
1690 Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion);
1701 REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
1702 ScopedLock L(Region->FLLock);
1719 // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
1721 for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
1726 Region->FreeListInfo.BlockList.append_back(&GroupsToRelease);
1730 DCHECK(!BG->Batches.empty());
1732 if (BG->CompactPtrGroupBase <
1733 GroupsToRelease.front()->CompactPtrGroupBase) {
1735 BG = BG->Next;
1743 if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
1746 BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
1747 const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch;
1750 // full and only the first TransferBatch can have non-full blocks. Thus
1752 if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
1753 BG->Batches.append_back(&Cur->Batches);
1755 TransferBatchT *NonFullBatch = Cur->Batches.front();
1756 Cur->Batches.pop_front();
1757 const u16 NonFullBatchCount = NonFullBatch->getCount();
1759 BG->Batches.append_back(&Cur->Batches);
1761 if (BG->Batches.front()->getCount() == MaxCachedPerBatch) {
1762 // Only 1 non-full TransferBatch, push it to the front.
1763 BG->Batches.push_front(NonFullBatch);
1766 Min(static_cast<u16>(MaxCachedPerBatch -
1767 BG->Batches.front()->getCount()),
1769 BG->Batches.front()->appendFromTransferBatch(NonFullBatch,
1771 if (NonFullBatch->isEmpty())
1774 BG->Batches.push_front(NonFullBatch);
1780 ScopedLock L(BatchClassRegion->FLLock);
1783 BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
1794 BG = BG->Next;
1809 Region->FreeListInfo.BlockList.push_front(Cur);
1811 Region->FreeListInfo.BlockList.insert(Prev, Cur);
1812 DCHECK_EQ(Cur->Next, BG);
1817 ScopedLock L(BatchClassRegion->FLLock);
1820 BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
1824 BatchGroupT *Prev = Region->FreeListInfo.BlockList.front();
1825 for (BatchGroupT *Cur = Prev->Next; Cur != nullptr;
1826 Prev = Cur, Cur = Cur->Next) {
1827 CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
1832 Region->FLLockCV.notifyAll(Region->FLLock);