10b57cec5SDimitry Andric //===-- primary64.h ---------------------------------------------*- C++ -*-===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric 90b57cec5SDimitry Andric #ifndef SCUDO_PRIMARY64_H_ 100b57cec5SDimitry Andric #define SCUDO_PRIMARY64_H_ 110b57cec5SDimitry Andric 125f757f3fSDimitry Andric #include "allocator_common.h" 130b57cec5SDimitry Andric #include "bytemap.h" 140b57cec5SDimitry Andric #include "common.h" 15*0fca6ea1SDimitry Andric #include "condition_variable.h" 160b57cec5SDimitry Andric #include "list.h" 170b57cec5SDimitry Andric #include "local_cache.h" 1806c3fb27SDimitry Andric #include "mem_map.h" 195ffd83dbSDimitry Andric #include "memtag.h" 20e8d8bef9SDimitry Andric #include "options.h" 210b57cec5SDimitry Andric #include "release.h" 220b57cec5SDimitry Andric #include "stats.h" 230b57cec5SDimitry Andric #include "string_utils.h" 2406c3fb27SDimitry Andric #include "thread_annotations.h" 250b57cec5SDimitry Andric 260b57cec5SDimitry Andric namespace scudo { 270b57cec5SDimitry Andric 280b57cec5SDimitry Andric // SizeClassAllocator64 is an allocator tuned for 64-bit address space. 290b57cec5SDimitry Andric // 300b57cec5SDimitry Andric // It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in 310b57cec5SDimitry Andric // Regions, specific to each size class. Note that the base of that mapping is 32fe6060f1SDimitry Andric // random (based to the platform specific map() capabilities). If 33fe6060f1SDimitry Andric // PrimaryEnableRandomOffset is set, each Region actually starts at a random 34fe6060f1SDimitry Andric // offset from its base. 350b57cec5SDimitry Andric // 360b57cec5SDimitry Andric // Regions are mapped incrementally on demand to fulfill allocation requests, 370b57cec5SDimitry Andric // those mappings being split into equally sized Blocks based on the size class 380b57cec5SDimitry Andric // they belong to. The Blocks created are shuffled to prevent predictable 390b57cec5SDimitry Andric // address patterns (the predictability increases with the size of the Blocks). 400b57cec5SDimitry Andric // 410b57cec5SDimitry Andric // The 1st Region (for size class 0) holds the TransferBatches. This is a 420b57cec5SDimitry Andric // structure used to transfer arrays of available pointers from the class size 430b57cec5SDimitry Andric // freelist to the thread specific freelist, and back. 440b57cec5SDimitry Andric // 450b57cec5SDimitry Andric // The memory used by this allocator is never unmapped, but can be partially 4668d75effSDimitry Andric // released if the platform allows for it. 470b57cec5SDimitry Andric 48e8d8bef9SDimitry Andric template <typename Config> class SizeClassAllocator64 { 490b57cec5SDimitry Andric public: 50*0fca6ea1SDimitry Andric typedef typename Config::CompactPtrT CompactPtrT; 51*0fca6ea1SDimitry Andric typedef typename Config::SizeClassMap SizeClassMap; 52*0fca6ea1SDimitry Andric typedef typename Config::ConditionVariableT ConditionVariableT; 53*0fca6ea1SDimitry Andric static const uptr CompactPtrScale = Config::getCompactPtrScale(); 54*0fca6ea1SDimitry Andric static const uptr RegionSizeLog = Config::getRegionSizeLog(); 55*0fca6ea1SDimitry Andric static const uptr GroupSizeLog = Config::getGroupSizeLog(); 565f757f3fSDimitry Andric static_assert(RegionSizeLog >= GroupSizeLog, 575f757f3fSDimitry Andric "Group size shouldn't be greater than the region size"); 5806c3fb27SDimitry Andric static const uptr GroupScale = GroupSizeLog - CompactPtrScale; 59e8d8bef9SDimitry Andric typedef SizeClassAllocator64<Config> ThisT; 600b57cec5SDimitry Andric typedef SizeClassAllocatorLocalCache<ThisT> CacheT; 615f757f3fSDimitry Andric typedef TransferBatch<ThisT> TransferBatchT; 625f757f3fSDimitry Andric typedef BatchGroup<ThisT> BatchGroupT; 635f757f3fSDimitry Andric 645f757f3fSDimitry Andric static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT), 655f757f3fSDimitry Andric "BatchGroupT uses the same class size as TransferBatchT"); 660b57cec5SDimitry Andric 670b57cec5SDimitry Andric static uptr getSizeByClassId(uptr ClassId) { 680b57cec5SDimitry Andric return (ClassId == SizeClassMap::BatchClassId) 695f757f3fSDimitry Andric ? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale) 700b57cec5SDimitry Andric : SizeClassMap::getSizeByClassId(ClassId); 710b57cec5SDimitry Andric } 720b57cec5SDimitry Andric 730b57cec5SDimitry Andric static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; } 740b57cec5SDimitry Andric 755f757f3fSDimitry Andric static bool conditionVariableEnabled() { 76*0fca6ea1SDimitry Andric return Config::hasConditionVariableT(); 775f757f3fSDimitry Andric } 785f757f3fSDimitry Andric 7906c3fb27SDimitry Andric void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { 80fe6060f1SDimitry Andric DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT))); 8106c3fb27SDimitry Andric 8206c3fb27SDimitry Andric const uptr PageSize = getPageSizeCached(); 835f757f3fSDimitry Andric const uptr GroupSize = (1UL << GroupSizeLog); 8406c3fb27SDimitry Andric const uptr PagesInGroup = GroupSize / PageSize; 8506c3fb27SDimitry Andric const uptr MinSizeClass = getSizeByClassId(1); 8606c3fb27SDimitry Andric // When trying to release pages back to memory, visiting smaller size 8706c3fb27SDimitry Andric // classes is expensive. Therefore, we only try to release smaller size 8806c3fb27SDimitry Andric // classes when the amount of free blocks goes over a certain threshold (See 8906c3fb27SDimitry Andric // the comment in releaseToOSMaybe() for more details). For example, for 9006c3fb27SDimitry Andric // size class 32, we only do the release when the size of free blocks is 9106c3fb27SDimitry Andric // greater than 97% of pages in a group. However, this may introduce another 9206c3fb27SDimitry Andric // issue that if the number of free blocks is bouncing between 97% ~ 100%. 9306c3fb27SDimitry Andric // Which means we may try many page releases but only release very few of 9406c3fb27SDimitry Andric // them (less than 3% in a group). Even though we have 9506c3fb27SDimitry Andric // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these 9606c3fb27SDimitry Andric // calls but it will be better to have another guard to mitigate this issue. 9706c3fb27SDimitry Andric // 9806c3fb27SDimitry Andric // Here we add another constraint on the minimum size requirement. The 9906c3fb27SDimitry Andric // constraint is determined by the size of in-use blocks in the minimal size 10006c3fb27SDimitry Andric // class. Take size class 32 as an example, 10106c3fb27SDimitry Andric // 10206c3fb27SDimitry Andric // +- one memory group -+ 10306c3fb27SDimitry Andric // +----------------------+------+ 10406c3fb27SDimitry Andric // | 97% of free blocks | | 10506c3fb27SDimitry Andric // +----------------------+------+ 10606c3fb27SDimitry Andric // \ / 10706c3fb27SDimitry Andric // 3% in-use blocks 10806c3fb27SDimitry Andric // 10906c3fb27SDimitry Andric // * The release size threshold is 97%. 11006c3fb27SDimitry Andric // 11106c3fb27SDimitry Andric // The 3% size in a group is about 7 pages. For two consecutive 11206c3fb27SDimitry Andric // releaseToOSMaybe(), we require the difference between `PushedBlocks` 11306c3fb27SDimitry Andric // should be greater than 7 pages. This mitigates the page releasing 11406c3fb27SDimitry Andric // thrashing which is caused by memory usage bouncing around the threshold. 11506c3fb27SDimitry Andric // The smallest size class takes longest time to do the page release so we 11606c3fb27SDimitry Andric // use its size of in-use blocks as a heuristic. 11706c3fb27SDimitry Andric SmallerBlockReleasePageDelta = 11806c3fb27SDimitry Andric PagesInGroup * (1 + MinSizeClass / 16U) / 100; 11906c3fb27SDimitry Andric 1200b57cec5SDimitry Andric u32 Seed; 12106c3fb27SDimitry Andric const u64 Time = getMonotonicTimeFast(); 122fe6060f1SDimitry Andric if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))) 123*0fca6ea1SDimitry Andric Seed = static_cast<u32>(Time ^ (reinterpret_cast<uptr>(&Seed) >> 12)); 124*0fca6ea1SDimitry Andric 125*0fca6ea1SDimitry Andric for (uptr I = 0; I < NumClasses; I++) 126*0fca6ea1SDimitry Andric getRegionInfo(I)->RandState = getRandomU32(&Seed); 127*0fca6ea1SDimitry Andric 128*0fca6ea1SDimitry Andric if (Config::getEnableContiguousRegions()) { 129*0fca6ea1SDimitry Andric ReservedMemoryT ReservedMemory = {}; 130*0fca6ea1SDimitry Andric // Reserve the space required for the Primary. 131*0fca6ea1SDimitry Andric CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses, 132*0fca6ea1SDimitry Andric "scudo:primary_reserve")); 133*0fca6ea1SDimitry Andric const uptr PrimaryBase = ReservedMemory.getBase(); 13406c3fb27SDimitry Andric 1350b57cec5SDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 136*0fca6ea1SDimitry Andric MemMapT RegionMemMap = ReservedMemory.dispatch( 137*0fca6ea1SDimitry Andric PrimaryBase + (I << RegionSizeLog), RegionSize); 1380b57cec5SDimitry Andric RegionInfo *Region = getRegionInfo(I); 1395f757f3fSDimitry Andric 140*0fca6ea1SDimitry Andric initRegion(Region, I, RegionMemMap, Config::getEnableRandomOffset()); 1410b57cec5SDimitry Andric } 14206c3fb27SDimitry Andric shuffle(RegionInfoArray, NumClasses, &Seed); 143*0fca6ea1SDimitry Andric } 14406c3fb27SDimitry Andric 1455f757f3fSDimitry Andric // The binding should be done after region shuffling so that it won't bind 1465f757f3fSDimitry Andric // the FLLock from the wrong region. 1475f757f3fSDimitry Andric for (uptr I = 0; I < NumClasses; I++) 1485f757f3fSDimitry Andric getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock); 1495f757f3fSDimitry Andric 150*0fca6ea1SDimitry Andric // The default value in the primary config has the higher priority. 151*0fca6ea1SDimitry Andric if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN) 152*0fca6ea1SDimitry Andric ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); 153e8d8bef9SDimitry Andric setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval)); 1540b57cec5SDimitry Andric } 1550b57cec5SDimitry Andric 156*0fca6ea1SDimitry Andric void unmapTestOnly() { 157fe6060f1SDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 158fe6060f1SDimitry Andric RegionInfo *Region = getRegionInfo(I); 159*0fca6ea1SDimitry Andric { 160*0fca6ea1SDimitry Andric ScopedLock ML(Region->MMLock); 161*0fca6ea1SDimitry Andric MemMapT MemMap = Region->MemMapInfo.MemMap; 162*0fca6ea1SDimitry Andric if (MemMap.isAllocated()) 163*0fca6ea1SDimitry Andric MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); 164*0fca6ea1SDimitry Andric } 165fe6060f1SDimitry Andric *Region = {}; 166fe6060f1SDimitry Andric } 1670b57cec5SDimitry Andric } 1680b57cec5SDimitry Andric 16906c3fb27SDimitry Andric // When all blocks are freed, it has to be the same size as `AllocatedUser`. 17006c3fb27SDimitry Andric void verifyAllBlocksAreReleasedTestOnly() { 17106c3fb27SDimitry Andric // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass. 17206c3fb27SDimitry Andric uptr BatchClassUsedInFreeLists = 0; 17306c3fb27SDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 17406c3fb27SDimitry Andric // We have to count BatchClassUsedInFreeLists in other regions first. 17506c3fb27SDimitry Andric if (I == SizeClassMap::BatchClassId) 17606c3fb27SDimitry Andric continue; 17706c3fb27SDimitry Andric RegionInfo *Region = getRegionInfo(I); 17806c3fb27SDimitry Andric ScopedLock ML(Region->MMLock); 17906c3fb27SDimitry Andric ScopedLock FL(Region->FLLock); 18006c3fb27SDimitry Andric const uptr BlockSize = getSizeByClassId(I); 18106c3fb27SDimitry Andric uptr TotalBlocks = 0; 1825f757f3fSDimitry Andric for (BatchGroupT &BG : Region->FreeListInfo.BlockList) { 18306c3fb27SDimitry Andric // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`. 18406c3fb27SDimitry Andric BatchClassUsedInFreeLists += BG.Batches.size() + 1; 18506c3fb27SDimitry Andric for (const auto &It : BG.Batches) 18606c3fb27SDimitry Andric TotalBlocks += It.getCount(); 18706c3fb27SDimitry Andric } 18806c3fb27SDimitry Andric 18906c3fb27SDimitry Andric DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize); 19006c3fb27SDimitry Andric DCHECK_EQ(Region->FreeListInfo.PushedBlocks, 19106c3fb27SDimitry Andric Region->FreeListInfo.PoppedBlocks); 19206c3fb27SDimitry Andric } 19306c3fb27SDimitry Andric 19406c3fb27SDimitry Andric RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId); 19506c3fb27SDimitry Andric ScopedLock ML(Region->MMLock); 19606c3fb27SDimitry Andric ScopedLock FL(Region->FLLock); 19706c3fb27SDimitry Andric const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId); 19806c3fb27SDimitry Andric uptr TotalBlocks = 0; 1995f757f3fSDimitry Andric for (BatchGroupT &BG : Region->FreeListInfo.BlockList) { 20006c3fb27SDimitry Andric if (LIKELY(!BG.Batches.empty())) { 20106c3fb27SDimitry Andric for (const auto &It : BG.Batches) 20206c3fb27SDimitry Andric TotalBlocks += It.getCount(); 20306c3fb27SDimitry Andric } else { 20406c3fb27SDimitry Andric // `BatchGroup` with empty freelist doesn't have `TransferBatch` record 20506c3fb27SDimitry Andric // itself. 20606c3fb27SDimitry Andric ++TotalBlocks; 20706c3fb27SDimitry Andric } 20806c3fb27SDimitry Andric } 20906c3fb27SDimitry Andric DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists, 21006c3fb27SDimitry Andric Region->MemMapInfo.AllocatedUser / BlockSize); 21106c3fb27SDimitry Andric DCHECK_GE(Region->FreeListInfo.PoppedBlocks, 21206c3fb27SDimitry Andric Region->FreeListInfo.PushedBlocks); 21306c3fb27SDimitry Andric const uptr BlocksInUse = 21406c3fb27SDimitry Andric Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks; 21506c3fb27SDimitry Andric DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists); 21606c3fb27SDimitry Andric } 21706c3fb27SDimitry Andric 2185f757f3fSDimitry Andric u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray, 219*0fca6ea1SDimitry Andric const u16 MaxBlockCount) { 2200b57cec5SDimitry Andric DCHECK_LT(ClassId, NumClasses); 2210b57cec5SDimitry Andric RegionInfo *Region = getRegionInfo(ClassId); 222*0fca6ea1SDimitry Andric u16 PopCount = 0; 22306c3fb27SDimitry Andric 22406c3fb27SDimitry Andric { 22506c3fb27SDimitry Andric ScopedLock L(Region->FLLock); 226*0fca6ea1SDimitry Andric PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount); 227*0fca6ea1SDimitry Andric if (PopCount != 0U) 228*0fca6ea1SDimitry Andric return PopCount; 2290b57cec5SDimitry Andric } 23006c3fb27SDimitry Andric 2315f757f3fSDimitry Andric bool ReportRegionExhausted = false; 23206c3fb27SDimitry Andric 2335f757f3fSDimitry Andric if (conditionVariableEnabled()) { 234*0fca6ea1SDimitry Andric PopCount = popBlocksWithCV(C, ClassId, Region, ToArray, MaxBlockCount, 235*0fca6ea1SDimitry Andric ReportRegionExhausted); 2365f757f3fSDimitry Andric } else { 23706c3fb27SDimitry Andric while (true) { 2385f757f3fSDimitry Andric // When two threads compete for `Region->MMLock`, we only want one of 2395f757f3fSDimitry Andric // them to call populateFreeListAndPopBatch(). To avoid both of them 2405f757f3fSDimitry Andric // doing that, always check the freelist before mapping new pages. 24106c3fb27SDimitry Andric ScopedLock ML(Region->MMLock); 24206c3fb27SDimitry Andric { 24306c3fb27SDimitry Andric ScopedLock FL(Region->FLLock); 244*0fca6ea1SDimitry Andric PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount); 245*0fca6ea1SDimitry Andric if (PopCount != 0U) 246*0fca6ea1SDimitry Andric return PopCount; 24706c3fb27SDimitry Andric } 24806c3fb27SDimitry Andric 24906c3fb27SDimitry Andric const bool RegionIsExhausted = Region->Exhausted; 250*0fca6ea1SDimitry Andric if (!RegionIsExhausted) { 251*0fca6ea1SDimitry Andric PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray, 252*0fca6ea1SDimitry Andric MaxBlockCount); 253*0fca6ea1SDimitry Andric } 2545f757f3fSDimitry Andric ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted; 25506c3fb27SDimitry Andric break; 25606c3fb27SDimitry Andric } 2575f757f3fSDimitry Andric } 25806c3fb27SDimitry Andric 2595f757f3fSDimitry Andric if (UNLIKELY(ReportRegionExhausted)) { 2605f757f3fSDimitry Andric Printf("Can't populate more pages for size class %zu.\n", 2615f757f3fSDimitry Andric getSizeByClassId(ClassId)); 26206c3fb27SDimitry Andric 26306c3fb27SDimitry Andric // Theoretically, BatchClass shouldn't be used up. Abort immediately when 26406c3fb27SDimitry Andric // it happens. 26506c3fb27SDimitry Andric if (ClassId == SizeClassMap::BatchClassId) 26606c3fb27SDimitry Andric reportOutOfBatchClass(); 26706c3fb27SDimitry Andric } 26806c3fb27SDimitry Andric 269*0fca6ea1SDimitry Andric return PopCount; 2700b57cec5SDimitry Andric } 2710b57cec5SDimitry Andric 272bdd1243dSDimitry Andric // Push the array of free blocks to the designated batch group. 273bdd1243dSDimitry Andric void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) { 274bdd1243dSDimitry Andric DCHECK_LT(ClassId, NumClasses); 275bdd1243dSDimitry Andric DCHECK_GT(Size, 0); 276bdd1243dSDimitry Andric 2770b57cec5SDimitry Andric RegionInfo *Region = getRegionInfo(ClassId); 278bdd1243dSDimitry Andric if (ClassId == SizeClassMap::BatchClassId) { 27906c3fb27SDimitry Andric ScopedLock L(Region->FLLock); 28006c3fb27SDimitry Andric pushBatchClassBlocks(Region, Array, Size); 2815f757f3fSDimitry Andric if (conditionVariableEnabled()) 2825f757f3fSDimitry Andric Region->FLLockCV.notifyAll(Region->FLLock); 283bdd1243dSDimitry Andric return; 284bdd1243dSDimitry Andric } 285bdd1243dSDimitry Andric 286bdd1243dSDimitry Andric // TODO(chiahungduan): Consider not doing grouping if the group size is not 287bdd1243dSDimitry Andric // greater than the block size with a certain scale. 288bdd1243dSDimitry Andric 289bdd1243dSDimitry Andric bool SameGroup = true; 2905f757f3fSDimitry Andric if (GroupSizeLog < RegionSizeLog) { 2915f757f3fSDimitry Andric // Sort the blocks so that blocks belonging to the same group can be 2925f757f3fSDimitry Andric // pushed together. 293bdd1243dSDimitry Andric for (u32 I = 1; I < Size; ++I) { 294bdd1243dSDimitry Andric if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) 295bdd1243dSDimitry Andric SameGroup = false; 296bdd1243dSDimitry Andric CompactPtrT Cur = Array[I]; 297bdd1243dSDimitry Andric u32 J = I; 298bdd1243dSDimitry Andric while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) { 299bdd1243dSDimitry Andric Array[J] = Array[J - 1]; 300bdd1243dSDimitry Andric --J; 301bdd1243dSDimitry Andric } 302bdd1243dSDimitry Andric Array[J] = Cur; 303bdd1243dSDimitry Andric } 3045f757f3fSDimitry Andric } 305bdd1243dSDimitry Andric 30606c3fb27SDimitry Andric { 30706c3fb27SDimitry Andric ScopedLock L(Region->FLLock); 30806c3fb27SDimitry Andric pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup); 3095f757f3fSDimitry Andric if (conditionVariableEnabled()) 3105f757f3fSDimitry Andric Region->FLLockCV.notifyAll(Region->FLLock); 31106c3fb27SDimitry Andric } 3120b57cec5SDimitry Andric } 3130b57cec5SDimitry Andric 31406c3fb27SDimitry Andric void disable() NO_THREAD_SAFETY_ANALYSIS { 315480093f4SDimitry Andric // The BatchClassId must be locked last since other classes can use it. 316480093f4SDimitry Andric for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) { 317480093f4SDimitry Andric if (static_cast<uptr>(I) == SizeClassMap::BatchClassId) 318480093f4SDimitry Andric continue; 31906c3fb27SDimitry Andric getRegionInfo(static_cast<uptr>(I))->MMLock.lock(); 32006c3fb27SDimitry Andric getRegionInfo(static_cast<uptr>(I))->FLLock.lock(); 321480093f4SDimitry Andric } 32206c3fb27SDimitry Andric getRegionInfo(SizeClassMap::BatchClassId)->MMLock.lock(); 32306c3fb27SDimitry Andric getRegionInfo(SizeClassMap::BatchClassId)->FLLock.lock(); 3240b57cec5SDimitry Andric } 3250b57cec5SDimitry Andric 32606c3fb27SDimitry Andric void enable() NO_THREAD_SAFETY_ANALYSIS { 32706c3fb27SDimitry Andric getRegionInfo(SizeClassMap::BatchClassId)->FLLock.unlock(); 32806c3fb27SDimitry Andric getRegionInfo(SizeClassMap::BatchClassId)->MMLock.unlock(); 329480093f4SDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 330480093f4SDimitry Andric if (I == SizeClassMap::BatchClassId) 331480093f4SDimitry Andric continue; 33206c3fb27SDimitry Andric getRegionInfo(I)->FLLock.unlock(); 33306c3fb27SDimitry Andric getRegionInfo(I)->MMLock.unlock(); 334480093f4SDimitry Andric } 3350b57cec5SDimitry Andric } 3360b57cec5SDimitry Andric 3375ffd83dbSDimitry Andric template <typename F> void iterateOverBlocks(F Callback) { 33868d75effSDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 33968d75effSDimitry Andric if (I == SizeClassMap::BatchClassId) 34068d75effSDimitry Andric continue; 34106c3fb27SDimitry Andric RegionInfo *Region = getRegionInfo(I); 34206c3fb27SDimitry Andric // TODO: The call of `iterateOverBlocks` requires disabling 34306c3fb27SDimitry Andric // SizeClassAllocator64. We may consider locking each region on demand 34406c3fb27SDimitry Andric // only. 34506c3fb27SDimitry Andric Region->FLLock.assertHeld(); 34606c3fb27SDimitry Andric Region->MMLock.assertHeld(); 3470b57cec5SDimitry Andric const uptr BlockSize = getSizeByClassId(I); 3480b57cec5SDimitry Andric const uptr From = Region->RegionBeg; 34906c3fb27SDimitry Andric const uptr To = From + Region->MemMapInfo.AllocatedUser; 3500b57cec5SDimitry Andric for (uptr Block = From; Block < To; Block += BlockSize) 3510b57cec5SDimitry Andric Callback(Block); 3520b57cec5SDimitry Andric } 3530b57cec5SDimitry Andric } 3540b57cec5SDimitry Andric 3555ffd83dbSDimitry Andric void getStats(ScopedString *Str) { 3560b57cec5SDimitry Andric // TODO(kostyak): get the RSS per region. 3570b57cec5SDimitry Andric uptr TotalMapped = 0; 3580b57cec5SDimitry Andric uptr PoppedBlocks = 0; 3590b57cec5SDimitry Andric uptr PushedBlocks = 0; 3600b57cec5SDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 3610b57cec5SDimitry Andric RegionInfo *Region = getRegionInfo(I); 36206c3fb27SDimitry Andric { 36306c3fb27SDimitry Andric ScopedLock L(Region->MMLock); 36406c3fb27SDimitry Andric TotalMapped += Region->MemMapInfo.MappedUser; 36506c3fb27SDimitry Andric } 36606c3fb27SDimitry Andric { 36706c3fb27SDimitry Andric ScopedLock L(Region->FLLock); 36806c3fb27SDimitry Andric PoppedBlocks += Region->FreeListInfo.PoppedBlocks; 36906c3fb27SDimitry Andric PushedBlocks += Region->FreeListInfo.PushedBlocks; 37006c3fb27SDimitry Andric } 3710b57cec5SDimitry Andric } 372*0fca6ea1SDimitry Andric const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); 373349cc55cSDimitry Andric Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu " 374*0fca6ea1SDimitry Andric "allocations; remains %zu; ReleaseToOsIntervalMs = %d\n", 375349cc55cSDimitry Andric TotalMapped >> 20, 0U, PoppedBlocks, 376*0fca6ea1SDimitry Andric PoppedBlocks - PushedBlocks, IntervalMs >= 0 ? IntervalMs : -1); 3770b57cec5SDimitry Andric 37806c3fb27SDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 37906c3fb27SDimitry Andric RegionInfo *Region = getRegionInfo(I); 38006c3fb27SDimitry Andric ScopedLock L1(Region->MMLock); 38106c3fb27SDimitry Andric ScopedLock L2(Region->FLLock); 38206c3fb27SDimitry Andric getStats(Str, I, Region); 38306c3fb27SDimitry Andric } 3840b57cec5SDimitry Andric } 3850b57cec5SDimitry Andric 3865f757f3fSDimitry Andric void getFragmentationInfo(ScopedString *Str) { 3875f757f3fSDimitry Andric Str->append( 3885f757f3fSDimitry Andric "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n", 3895f757f3fSDimitry Andric getPageSizeCached()); 3905f757f3fSDimitry Andric 3915f757f3fSDimitry Andric for (uptr I = 1; I < NumClasses; I++) { 3925f757f3fSDimitry Andric RegionInfo *Region = getRegionInfo(I); 3935f757f3fSDimitry Andric ScopedLock L(Region->MMLock); 3945f757f3fSDimitry Andric getRegionFragmentationInfo(Region, I, Str); 3955f757f3fSDimitry Andric } 3965f757f3fSDimitry Andric } 3975f757f3fSDimitry Andric 398e8d8bef9SDimitry Andric bool setOption(Option O, sptr Value) { 399e8d8bef9SDimitry Andric if (O == Option::ReleaseInterval) { 400*0fca6ea1SDimitry Andric const s32 Interval = Max( 401*0fca6ea1SDimitry Andric Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()), 402*0fca6ea1SDimitry Andric Config::getMinReleaseToOsIntervalMs()); 403e8d8bef9SDimitry Andric atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); 404e8d8bef9SDimitry Andric return true; 4055ffd83dbSDimitry Andric } 406e8d8bef9SDimitry Andric // Not supported by the Primary, but not an error either. 407e8d8bef9SDimitry Andric return true; 4085ffd83dbSDimitry Andric } 4095ffd83dbSDimitry Andric 41006c3fb27SDimitry Andric uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) { 41106c3fb27SDimitry Andric RegionInfo *Region = getRegionInfo(ClassId); 41206c3fb27SDimitry Andric // Note that the tryLock() may fail spuriously, given that it should rarely 41306c3fb27SDimitry Andric // happen and page releasing is fine to skip, we don't take certain 41406c3fb27SDimitry Andric // approaches to ensure one page release is done. 41506c3fb27SDimitry Andric if (Region->MMLock.tryLock()) { 41606c3fb27SDimitry Andric uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType); 41706c3fb27SDimitry Andric Region->MMLock.unlock(); 41806c3fb27SDimitry Andric return BytesReleased; 41906c3fb27SDimitry Andric } 42006c3fb27SDimitry Andric return 0; 42106c3fb27SDimitry Andric } 42206c3fb27SDimitry Andric 42306c3fb27SDimitry Andric uptr releaseToOS(ReleaseToOS ReleaseType) { 42468d75effSDimitry Andric uptr TotalReleasedBytes = 0; 4250b57cec5SDimitry Andric for (uptr I = 0; I < NumClasses; I++) { 426e8d8bef9SDimitry Andric if (I == SizeClassMap::BatchClassId) 427e8d8bef9SDimitry Andric continue; 4280b57cec5SDimitry Andric RegionInfo *Region = getRegionInfo(I); 42906c3fb27SDimitry Andric ScopedLock L(Region->MMLock); 43006c3fb27SDimitry Andric TotalReleasedBytes += releaseToOSMaybe(Region, I, ReleaseType); 4310b57cec5SDimitry Andric } 43268d75effSDimitry Andric return TotalReleasedBytes; 4330b57cec5SDimitry Andric } 4340b57cec5SDimitry Andric 4355ffd83dbSDimitry Andric const char *getRegionInfoArrayAddress() const { 4365ffd83dbSDimitry Andric return reinterpret_cast<const char *>(RegionInfoArray); 4375ffd83dbSDimitry Andric } 4385ffd83dbSDimitry Andric 439e8d8bef9SDimitry Andric static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); } 4405ffd83dbSDimitry Andric 441fe6060f1SDimitry Andric uptr getCompactPtrBaseByClassId(uptr ClassId) { 442fe6060f1SDimitry Andric return getRegionInfo(ClassId)->RegionBeg; 443fe6060f1SDimitry Andric } 444fe6060f1SDimitry Andric 445fe6060f1SDimitry Andric CompactPtrT compactPtr(uptr ClassId, uptr Ptr) { 446fe6060f1SDimitry Andric DCHECK_LE(ClassId, SizeClassMap::LargestClassId); 447fe6060f1SDimitry Andric return compactPtrInternal(getCompactPtrBaseByClassId(ClassId), Ptr); 448fe6060f1SDimitry Andric } 449fe6060f1SDimitry Andric 450fe6060f1SDimitry Andric void *decompactPtr(uptr ClassId, CompactPtrT CompactPtr) { 451fe6060f1SDimitry Andric DCHECK_LE(ClassId, SizeClassMap::LargestClassId); 452fe6060f1SDimitry Andric return reinterpret_cast<void *>( 453fe6060f1SDimitry Andric decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr)); 454fe6060f1SDimitry Andric } 455fe6060f1SDimitry Andric 45606c3fb27SDimitry Andric static BlockInfo findNearestBlock(const char *RegionInfoData, 45706c3fb27SDimitry Andric uptr Ptr) NO_THREAD_SAFETY_ANALYSIS { 4585ffd83dbSDimitry Andric const RegionInfo *RegionInfoArray = 4595ffd83dbSDimitry Andric reinterpret_cast<const RegionInfo *>(RegionInfoData); 46006c3fb27SDimitry Andric 4615ffd83dbSDimitry Andric uptr ClassId; 4625ffd83dbSDimitry Andric uptr MinDistance = -1UL; 4635ffd83dbSDimitry Andric for (uptr I = 0; I != NumClasses; ++I) { 4645ffd83dbSDimitry Andric if (I == SizeClassMap::BatchClassId) 4655ffd83dbSDimitry Andric continue; 4665ffd83dbSDimitry Andric uptr Begin = RegionInfoArray[I].RegionBeg; 46706c3fb27SDimitry Andric // TODO(chiahungduan): In fact, We need to lock the RegionInfo::MMLock. 46806c3fb27SDimitry Andric // However, the RegionInfoData is passed with const qualifier and lock the 46906c3fb27SDimitry Andric // mutex requires modifying RegionInfoData, which means we need to remove 47006c3fb27SDimitry Andric // the const qualifier. This may lead to another undefined behavior (The 47106c3fb27SDimitry Andric // first one is accessing `AllocatedUser` without locking. It's better to 47206c3fb27SDimitry Andric // pass `RegionInfoData` as `void *` then we can lock the mutex properly. 47306c3fb27SDimitry Andric uptr End = Begin + RegionInfoArray[I].MemMapInfo.AllocatedUser; 4745ffd83dbSDimitry Andric if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I)) 4755ffd83dbSDimitry Andric continue; 4765ffd83dbSDimitry Andric uptr RegionDistance; 4775ffd83dbSDimitry Andric if (Begin <= Ptr) { 4785ffd83dbSDimitry Andric if (Ptr < End) 4795ffd83dbSDimitry Andric RegionDistance = 0; 4805ffd83dbSDimitry Andric else 4815ffd83dbSDimitry Andric RegionDistance = Ptr - End; 4825ffd83dbSDimitry Andric } else { 4835ffd83dbSDimitry Andric RegionDistance = Begin - Ptr; 4845ffd83dbSDimitry Andric } 4855ffd83dbSDimitry Andric 4865ffd83dbSDimitry Andric if (RegionDistance < MinDistance) { 4875ffd83dbSDimitry Andric MinDistance = RegionDistance; 4885ffd83dbSDimitry Andric ClassId = I; 4895ffd83dbSDimitry Andric } 4905ffd83dbSDimitry Andric } 4915ffd83dbSDimitry Andric 4925ffd83dbSDimitry Andric BlockInfo B = {}; 4935ffd83dbSDimitry Andric if (MinDistance <= 8192) { 4945ffd83dbSDimitry Andric B.RegionBegin = RegionInfoArray[ClassId].RegionBeg; 49506c3fb27SDimitry Andric B.RegionEnd = 49606c3fb27SDimitry Andric B.RegionBegin + RegionInfoArray[ClassId].MemMapInfo.AllocatedUser; 4975ffd83dbSDimitry Andric B.BlockSize = SizeClassMap::getSizeByClassId(ClassId); 4985ffd83dbSDimitry Andric B.BlockBegin = 4995ffd83dbSDimitry Andric B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) * 5005ffd83dbSDimitry Andric sptr(B.BlockSize)); 5015ffd83dbSDimitry Andric while (B.BlockBegin < B.RegionBegin) 5025ffd83dbSDimitry Andric B.BlockBegin += B.BlockSize; 5035ffd83dbSDimitry Andric while (B.RegionEnd < B.BlockBegin + B.BlockSize) 5045ffd83dbSDimitry Andric B.BlockBegin -= B.BlockSize; 5055ffd83dbSDimitry Andric } 5065ffd83dbSDimitry Andric return B; 5075ffd83dbSDimitry Andric } 5085ffd83dbSDimitry Andric 509e8d8bef9SDimitry Andric AtomicOptions Options; 510e8d8bef9SDimitry Andric 5110b57cec5SDimitry Andric private: 5125f757f3fSDimitry Andric static const uptr RegionSize = 1UL << RegionSizeLog; 5130b57cec5SDimitry Andric static const uptr NumClasses = SizeClassMap::NumClasses; 5140b57cec5SDimitry Andric 515*0fca6ea1SDimitry Andric static const uptr MapSizeIncrement = Config::getMapSizeIncrement(); 516480093f4SDimitry Andric // Fill at most this number of batches from the newly map'd memory. 5175ffd83dbSDimitry Andric static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; 5180b57cec5SDimitry Andric 5190b57cec5SDimitry Andric struct ReleaseToOsInfo { 52006c3fb27SDimitry Andric uptr BytesInFreeListAtLastCheckpoint; 5210b57cec5SDimitry Andric uptr RangesReleased; 5220b57cec5SDimitry Andric uptr LastReleasedBytes; 5230b57cec5SDimitry Andric u64 LastReleaseAtNs; 5240b57cec5SDimitry Andric }; 5250b57cec5SDimitry Andric 52606c3fb27SDimitry Andric struct BlocksInfo { 5275f757f3fSDimitry Andric SinglyLinkedList<BatchGroupT> BlockList = {}; 52806c3fb27SDimitry Andric uptr PoppedBlocks = 0; 52906c3fb27SDimitry Andric uptr PushedBlocks = 0; 53006c3fb27SDimitry Andric }; 53106c3fb27SDimitry Andric 53206c3fb27SDimitry Andric struct PagesInfo { 53306c3fb27SDimitry Andric MemMapT MemMap = {}; 53406c3fb27SDimitry Andric // Bytes mapped for user memory. 53506c3fb27SDimitry Andric uptr MappedUser = 0; 53606c3fb27SDimitry Andric // Bytes allocated for user memory. 53706c3fb27SDimitry Andric uptr AllocatedUser = 0; 53806c3fb27SDimitry Andric }; 53906c3fb27SDimitry Andric 5405ffd83dbSDimitry Andric struct UnpaddedRegionInfo { 54106c3fb27SDimitry Andric // Mutex for operations on freelist 54206c3fb27SDimitry Andric HybridMutex FLLock; 5435f757f3fSDimitry Andric ConditionVariableT FLLockCV GUARDED_BY(FLLock); 54406c3fb27SDimitry Andric // Mutex for memmap operations 54506c3fb27SDimitry Andric HybridMutex MMLock ACQUIRED_BEFORE(FLLock); 54606c3fb27SDimitry Andric // `RegionBeg` is initialized before thread creation and won't be changed. 547fe6060f1SDimitry Andric uptr RegionBeg = 0; 54806c3fb27SDimitry Andric u32 RandState GUARDED_BY(MMLock) = 0; 54906c3fb27SDimitry Andric BlocksInfo FreeListInfo GUARDED_BY(FLLock); 55006c3fb27SDimitry Andric PagesInfo MemMapInfo GUARDED_BY(MMLock); 55106c3fb27SDimitry Andric // The minimum size of pushed blocks to trigger page release. 55206c3fb27SDimitry Andric uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0; 55306c3fb27SDimitry Andric ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {}; 55406c3fb27SDimitry Andric bool Exhausted GUARDED_BY(MMLock) = false; 5555f757f3fSDimitry Andric bool isPopulatingFreeList GUARDED_BY(FLLock) = false; 5560b57cec5SDimitry Andric }; 5575ffd83dbSDimitry Andric struct RegionInfo : UnpaddedRegionInfo { 5585ffd83dbSDimitry Andric char Padding[SCUDO_CACHE_LINE_SIZE - 559fe6060f1SDimitry Andric (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {}; 5605ffd83dbSDimitry Andric }; 561480093f4SDimitry Andric static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); 5620b57cec5SDimitry Andric 5635ffd83dbSDimitry Andric RegionInfo *getRegionInfo(uptr ClassId) { 5640b57cec5SDimitry Andric DCHECK_LT(ClassId, NumClasses); 5650b57cec5SDimitry Andric return &RegionInfoArray[ClassId]; 5660b57cec5SDimitry Andric } 5670b57cec5SDimitry Andric 56806c3fb27SDimitry Andric uptr getRegionBaseByClassId(uptr ClassId) { 569*0fca6ea1SDimitry Andric RegionInfo *Region = getRegionInfo(ClassId); 570*0fca6ea1SDimitry Andric Region->MMLock.assertHeld(); 571*0fca6ea1SDimitry Andric 572*0fca6ea1SDimitry Andric if (!Config::getEnableContiguousRegions() && 573*0fca6ea1SDimitry Andric !Region->MemMapInfo.MemMap.isAllocated()) { 574*0fca6ea1SDimitry Andric return 0U; 575*0fca6ea1SDimitry Andric } 576*0fca6ea1SDimitry Andric return Region->MemMapInfo.MemMap.getBase(); 5770b57cec5SDimitry Andric } 5780b57cec5SDimitry Andric 579fe6060f1SDimitry Andric static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) { 580fe6060f1SDimitry Andric return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale); 581fe6060f1SDimitry Andric } 582fe6060f1SDimitry Andric 583fe6060f1SDimitry Andric static uptr decompactPtrInternal(uptr Base, CompactPtrT CompactPtr) { 584fe6060f1SDimitry Andric return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale); 585fe6060f1SDimitry Andric } 586fe6060f1SDimitry Andric 587bdd1243dSDimitry Andric static uptr compactPtrGroup(CompactPtrT CompactPtr) { 58806c3fb27SDimitry Andric const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1; 58906c3fb27SDimitry Andric return static_cast<uptr>(CompactPtr) & ~Mask; 590bdd1243dSDimitry Andric } 59106c3fb27SDimitry Andric static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) { 59206c3fb27SDimitry Andric DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U); 59306c3fb27SDimitry Andric return Base + (CompactPtrGroupBase << CompactPtrScale); 59406c3fb27SDimitry Andric } 59506c3fb27SDimitry Andric 59606c3fb27SDimitry Andric ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) { 59706c3fb27SDimitry Andric const uptr PageSize = getPageSizeCached(); 59806c3fb27SDimitry Andric return BlockSize < PageSize / 16U; 59906c3fb27SDimitry Andric } 60006c3fb27SDimitry Andric 60106c3fb27SDimitry Andric ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) { 60206c3fb27SDimitry Andric const uptr PageSize = getPageSizeCached(); 60306c3fb27SDimitry Andric return BlockSize > PageSize; 60406c3fb27SDimitry Andric } 60506c3fb27SDimitry Andric 606*0fca6ea1SDimitry Andric ALWAYS_INLINE void initRegion(RegionInfo *Region, uptr ClassId, 607*0fca6ea1SDimitry Andric MemMapT MemMap, bool EnableRandomOffset) 608*0fca6ea1SDimitry Andric REQUIRES(Region->MMLock) { 609*0fca6ea1SDimitry Andric DCHECK(!Region->MemMapInfo.MemMap.isAllocated()); 610*0fca6ea1SDimitry Andric DCHECK(MemMap.isAllocated()); 611*0fca6ea1SDimitry Andric 612*0fca6ea1SDimitry Andric const uptr PageSize = getPageSizeCached(); 613*0fca6ea1SDimitry Andric 614*0fca6ea1SDimitry Andric Region->MemMapInfo.MemMap = MemMap; 615*0fca6ea1SDimitry Andric 616*0fca6ea1SDimitry Andric Region->RegionBeg = MemMap.getBase(); 617*0fca6ea1SDimitry Andric if (EnableRandomOffset) { 618*0fca6ea1SDimitry Andric Region->RegionBeg += 619*0fca6ea1SDimitry Andric (getRandomModN(&Region->RandState, 16) + 1) * PageSize; 620*0fca6ea1SDimitry Andric } 621*0fca6ea1SDimitry Andric 622*0fca6ea1SDimitry Andric // Releasing small blocks is expensive, set a higher threshold to avoid 623*0fca6ea1SDimitry Andric // frequent page releases. 624*0fca6ea1SDimitry Andric if (isSmallBlock(getSizeByClassId(ClassId))) 625*0fca6ea1SDimitry Andric Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta; 626*0fca6ea1SDimitry Andric else 627*0fca6ea1SDimitry Andric Region->TryReleaseThreshold = PageSize; 628*0fca6ea1SDimitry Andric } 629*0fca6ea1SDimitry Andric 63006c3fb27SDimitry Andric void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size) 63106c3fb27SDimitry Andric REQUIRES(Region->FLLock) { 63206c3fb27SDimitry Andric DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId)); 63306c3fb27SDimitry Andric 63406c3fb27SDimitry Andric // Free blocks are recorded by TransferBatch in freelist for all 63506c3fb27SDimitry Andric // size-classes. In addition, TransferBatch is allocated from BatchClassId. 63606c3fb27SDimitry Andric // In order not to use additional block to record the free blocks in 63706c3fb27SDimitry Andric // BatchClassId, they are self-contained. I.e., A TransferBatch records the 63806c3fb27SDimitry Andric // block address of itself. See the figure below: 63906c3fb27SDimitry Andric // 64006c3fb27SDimitry Andric // TransferBatch at 0xABCD 64106c3fb27SDimitry Andric // +----------------------------+ 64206c3fb27SDimitry Andric // | Free blocks' addr | 64306c3fb27SDimitry Andric // | +------+------+------+ | 64406c3fb27SDimitry Andric // | |0xABCD|... |... | | 64506c3fb27SDimitry Andric // | +------+------+------+ | 64606c3fb27SDimitry Andric // +----------------------------+ 64706c3fb27SDimitry Andric // 64806c3fb27SDimitry Andric // When we allocate all the free blocks in the TransferBatch, the block used 64906c3fb27SDimitry Andric // by TransferBatch is also free for use. We don't need to recycle the 65006c3fb27SDimitry Andric // TransferBatch. Note that the correctness is maintained by the invariant, 65106c3fb27SDimitry Andric // 652*0fca6ea1SDimitry Andric // Each popBlocks() request returns the entire TransferBatch. Returning 65306c3fb27SDimitry Andric // part of the blocks in a TransferBatch is invalid. 65406c3fb27SDimitry Andric // 65506c3fb27SDimitry Andric // This ensures that TransferBatch won't leak the address itself while it's 65606c3fb27SDimitry Andric // still holding other valid data. 65706c3fb27SDimitry Andric // 65806c3fb27SDimitry Andric // Besides, BatchGroup is also allocated from BatchClassId and has its 65906c3fb27SDimitry Andric // address recorded in the TransferBatch too. To maintain the correctness, 66006c3fb27SDimitry Andric // 66106c3fb27SDimitry Andric // The address of BatchGroup is always recorded in the last TransferBatch 66206c3fb27SDimitry Andric // in the freelist (also imply that the freelist should only be 66306c3fb27SDimitry Andric // updated with push_front). Once the last TransferBatch is popped, 66406c3fb27SDimitry Andric // the block used by BatchGroup is also free for use. 66506c3fb27SDimitry Andric // 66606c3fb27SDimitry Andric // With this approach, the blocks used by BatchGroup and TransferBatch are 66706c3fb27SDimitry Andric // reusable and don't need additional space for them. 66806c3fb27SDimitry Andric 66906c3fb27SDimitry Andric Region->FreeListInfo.PushedBlocks += Size; 6705f757f3fSDimitry Andric BatchGroupT *BG = Region->FreeListInfo.BlockList.front(); 67106c3fb27SDimitry Andric 67206c3fb27SDimitry Andric if (BG == nullptr) { 67306c3fb27SDimitry Andric // Construct `BatchGroup` on the last element. 6745f757f3fSDimitry Andric BG = reinterpret_cast<BatchGroupT *>( 67506c3fb27SDimitry Andric decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1])); 67606c3fb27SDimitry Andric --Size; 67706c3fb27SDimitry Andric BG->Batches.clear(); 67806c3fb27SDimitry Andric // BatchClass hasn't enabled memory group. Use `0` to indicate there's no 67906c3fb27SDimitry Andric // memory group here. 68006c3fb27SDimitry Andric BG->CompactPtrGroupBase = 0; 68106c3fb27SDimitry Andric // `BG` is also the block of BatchClassId. Note that this is different 68206c3fb27SDimitry Andric // from `CreateGroup` in `pushBlocksImpl` 68306c3fb27SDimitry Andric BG->PushedBlocks = 1; 68406c3fb27SDimitry Andric BG->BytesInBGAtLastCheckpoint = 0; 6855f757f3fSDimitry Andric BG->MaxCachedPerBatch = 6865f757f3fSDimitry Andric CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId)); 68706c3fb27SDimitry Andric 68806c3fb27SDimitry Andric Region->FreeListInfo.BlockList.push_front(BG); 68906c3fb27SDimitry Andric } 69006c3fb27SDimitry Andric 69106c3fb27SDimitry Andric if (UNLIKELY(Size == 0)) 69206c3fb27SDimitry Andric return; 69306c3fb27SDimitry Andric 69406c3fb27SDimitry Andric // This happens under 2 cases. 69506c3fb27SDimitry Andric // 1. just allocated a new `BatchGroup`. 69606c3fb27SDimitry Andric // 2. Only 1 block is pushed when the freelist is empty. 69706c3fb27SDimitry Andric if (BG->Batches.empty()) { 69806c3fb27SDimitry Andric // Construct the `TransferBatch` on the last element. 6995f757f3fSDimitry Andric TransferBatchT *TB = reinterpret_cast<TransferBatchT *>( 70006c3fb27SDimitry Andric decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1])); 70106c3fb27SDimitry Andric TB->clear(); 70206c3fb27SDimitry Andric // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are 70306c3fb27SDimitry Andric // recorded in the TransferBatch. 70406c3fb27SDimitry Andric TB->add(Array[Size - 1]); 70506c3fb27SDimitry Andric TB->add( 70606c3fb27SDimitry Andric compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG))); 70706c3fb27SDimitry Andric --Size; 70806c3fb27SDimitry Andric DCHECK_EQ(BG->PushedBlocks, 1U); 70906c3fb27SDimitry Andric // `TB` is also the block of BatchClassId. 71006c3fb27SDimitry Andric BG->PushedBlocks += 1; 71106c3fb27SDimitry Andric BG->Batches.push_front(TB); 71206c3fb27SDimitry Andric } 71306c3fb27SDimitry Andric 7145f757f3fSDimitry Andric TransferBatchT *CurBatch = BG->Batches.front(); 71506c3fb27SDimitry Andric DCHECK_NE(CurBatch, nullptr); 71606c3fb27SDimitry Andric 71706c3fb27SDimitry Andric for (u32 I = 0; I < Size;) { 71806c3fb27SDimitry Andric u16 UnusedSlots = 71906c3fb27SDimitry Andric static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount()); 72006c3fb27SDimitry Andric if (UnusedSlots == 0) { 7215f757f3fSDimitry Andric CurBatch = reinterpret_cast<TransferBatchT *>( 72206c3fb27SDimitry Andric decompactPtr(SizeClassMap::BatchClassId, Array[I])); 72306c3fb27SDimitry Andric CurBatch->clear(); 72406c3fb27SDimitry Andric // Self-contained 72506c3fb27SDimitry Andric CurBatch->add(Array[I]); 72606c3fb27SDimitry Andric ++I; 72706c3fb27SDimitry Andric // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of 72806c3fb27SDimitry Andric // BatchClassId. 72906c3fb27SDimitry Andric BG->Batches.push_front(CurBatch); 73006c3fb27SDimitry Andric UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1); 73106c3fb27SDimitry Andric } 73206c3fb27SDimitry Andric // `UnusedSlots` is u16 so the result will be also fit in u16. 73306c3fb27SDimitry Andric const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I)); 73406c3fb27SDimitry Andric CurBatch->appendFromArray(&Array[I], AppendSize); 73506c3fb27SDimitry Andric I += AppendSize; 73606c3fb27SDimitry Andric } 73706c3fb27SDimitry Andric 73806c3fb27SDimitry Andric BG->PushedBlocks += Size; 739bdd1243dSDimitry Andric } 740bdd1243dSDimitry Andric 741bdd1243dSDimitry Andric // Push the blocks to their batch group. The layout will be like, 742bdd1243dSDimitry Andric // 74306c3fb27SDimitry Andric // FreeListInfo.BlockList - > BG -> BG -> BG 744bdd1243dSDimitry Andric // | | | 745bdd1243dSDimitry Andric // v v v 746bdd1243dSDimitry Andric // TB TB TB 747bdd1243dSDimitry Andric // | 748bdd1243dSDimitry Andric // v 749bdd1243dSDimitry Andric // TB 750bdd1243dSDimitry Andric // 751bdd1243dSDimitry Andric // Each BlockGroup(BG) will associate with unique group id and the free blocks 752bdd1243dSDimitry Andric // are managed by a list of TransferBatch(TB). To reduce the time of inserting 753bdd1243dSDimitry Andric // blocks, BGs are sorted and the input `Array` are supposed to be sorted so 754bdd1243dSDimitry Andric // that we can get better performance of maintaining sorted property. 755bdd1243dSDimitry Andric // Use `SameGroup=true` to indicate that all blocks in the array are from the 756bdd1243dSDimitry Andric // same group then we will skip checking the group id of each block. 75706c3fb27SDimitry Andric void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region, 75806c3fb27SDimitry Andric CompactPtrT *Array, u32 Size, bool SameGroup = false) 75906c3fb27SDimitry Andric REQUIRES(Region->FLLock) { 76006c3fb27SDimitry Andric DCHECK_NE(ClassId, SizeClassMap::BatchClassId); 761bdd1243dSDimitry Andric DCHECK_GT(Size, 0U); 762bdd1243dSDimitry Andric 76306c3fb27SDimitry Andric auto CreateGroup = [&](uptr CompactPtrGroupBase) { 7645f757f3fSDimitry Andric BatchGroupT *BG = 7655f757f3fSDimitry Andric reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock()); 766bdd1243dSDimitry Andric BG->Batches.clear(); 7675f757f3fSDimitry Andric TransferBatchT *TB = 7685f757f3fSDimitry Andric reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock()); 769bdd1243dSDimitry Andric TB->clear(); 770bdd1243dSDimitry Andric 77106c3fb27SDimitry Andric BG->CompactPtrGroupBase = CompactPtrGroupBase; 772bdd1243dSDimitry Andric BG->Batches.push_front(TB); 773bdd1243dSDimitry Andric BG->PushedBlocks = 0; 77406c3fb27SDimitry Andric BG->BytesInBGAtLastCheckpoint = 0; 775*0fca6ea1SDimitry Andric BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached; 776bdd1243dSDimitry Andric 777bdd1243dSDimitry Andric return BG; 778bdd1243dSDimitry Andric }; 779bdd1243dSDimitry Andric 7805f757f3fSDimitry Andric auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) { 7815f757f3fSDimitry Andric SinglyLinkedList<TransferBatchT> &Batches = BG->Batches; 7825f757f3fSDimitry Andric TransferBatchT *CurBatch = Batches.front(); 783bdd1243dSDimitry Andric DCHECK_NE(CurBatch, nullptr); 784bdd1243dSDimitry Andric 785bdd1243dSDimitry Andric for (u32 I = 0; I < Size;) { 786bdd1243dSDimitry Andric DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount()); 787bdd1243dSDimitry Andric u16 UnusedSlots = 788bdd1243dSDimitry Andric static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount()); 789bdd1243dSDimitry Andric if (UnusedSlots == 0) { 7905f757f3fSDimitry Andric CurBatch = 7915f757f3fSDimitry Andric reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock()); 792bdd1243dSDimitry Andric CurBatch->clear(); 793bdd1243dSDimitry Andric Batches.push_front(CurBatch); 794bdd1243dSDimitry Andric UnusedSlots = BG->MaxCachedPerBatch; 795bdd1243dSDimitry Andric } 796bdd1243dSDimitry Andric // `UnusedSlots` is u16 so the result will be also fit in u16. 797bdd1243dSDimitry Andric u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I)); 798bdd1243dSDimitry Andric CurBatch->appendFromArray(&Array[I], AppendSize); 799bdd1243dSDimitry Andric I += AppendSize; 800bdd1243dSDimitry Andric } 801bdd1243dSDimitry Andric 802bdd1243dSDimitry Andric BG->PushedBlocks += Size; 803bdd1243dSDimitry Andric }; 804bdd1243dSDimitry Andric 80506c3fb27SDimitry Andric Region->FreeListInfo.PushedBlocks += Size; 8065f757f3fSDimitry Andric BatchGroupT *Cur = Region->FreeListInfo.BlockList.front(); 807bdd1243dSDimitry Andric 808bdd1243dSDimitry Andric // In the following, `Cur` always points to the BatchGroup for blocks that 809bdd1243dSDimitry Andric // will be pushed next. `Prev` is the element right before `Cur`. 8105f757f3fSDimitry Andric BatchGroupT *Prev = nullptr; 811bdd1243dSDimitry Andric 81206c3fb27SDimitry Andric while (Cur != nullptr && 81306c3fb27SDimitry Andric compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) { 814bdd1243dSDimitry Andric Prev = Cur; 815bdd1243dSDimitry Andric Cur = Cur->Next; 816bdd1243dSDimitry Andric } 817bdd1243dSDimitry Andric 81806c3fb27SDimitry Andric if (Cur == nullptr || 81906c3fb27SDimitry Andric compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) { 820bdd1243dSDimitry Andric Cur = CreateGroup(compactPtrGroup(Array[0])); 821bdd1243dSDimitry Andric if (Prev == nullptr) 82206c3fb27SDimitry Andric Region->FreeListInfo.BlockList.push_front(Cur); 823bdd1243dSDimitry Andric else 82406c3fb27SDimitry Andric Region->FreeListInfo.BlockList.insert(Prev, Cur); 825bdd1243dSDimitry Andric } 826bdd1243dSDimitry Andric 827bdd1243dSDimitry Andric // All the blocks are from the same group, just push without checking group 828bdd1243dSDimitry Andric // id. 829bdd1243dSDimitry Andric if (SameGroup) { 83006c3fb27SDimitry Andric for (u32 I = 0; I < Size; ++I) 83106c3fb27SDimitry Andric DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase); 83206c3fb27SDimitry Andric 833bdd1243dSDimitry Andric InsertBlocks(Cur, Array, Size); 834bdd1243dSDimitry Andric return; 835bdd1243dSDimitry Andric } 836bdd1243dSDimitry Andric 837bdd1243dSDimitry Andric // The blocks are sorted by group id. Determine the segment of group and 838bdd1243dSDimitry Andric // push them to their group together. 839bdd1243dSDimitry Andric u32 Count = 1; 840bdd1243dSDimitry Andric for (u32 I = 1; I < Size; ++I) { 841bdd1243dSDimitry Andric if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) { 84206c3fb27SDimitry Andric DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase); 843bdd1243dSDimitry Andric InsertBlocks(Cur, Array + I - Count, Count); 844bdd1243dSDimitry Andric 84506c3fb27SDimitry Andric while (Cur != nullptr && 84606c3fb27SDimitry Andric compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) { 847bdd1243dSDimitry Andric Prev = Cur; 848bdd1243dSDimitry Andric Cur = Cur->Next; 849bdd1243dSDimitry Andric } 850bdd1243dSDimitry Andric 85106c3fb27SDimitry Andric if (Cur == nullptr || 85206c3fb27SDimitry Andric compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) { 853bdd1243dSDimitry Andric Cur = CreateGroup(compactPtrGroup(Array[I])); 854bdd1243dSDimitry Andric DCHECK_NE(Prev, nullptr); 85506c3fb27SDimitry Andric Region->FreeListInfo.BlockList.insert(Prev, Cur); 856bdd1243dSDimitry Andric } 857bdd1243dSDimitry Andric 858bdd1243dSDimitry Andric Count = 1; 859bdd1243dSDimitry Andric } else { 860bdd1243dSDimitry Andric ++Count; 861bdd1243dSDimitry Andric } 862bdd1243dSDimitry Andric } 863bdd1243dSDimitry Andric 864bdd1243dSDimitry Andric InsertBlocks(Cur, Array + Size - Count, Count); 865bdd1243dSDimitry Andric } 866bdd1243dSDimitry Andric 867*0fca6ea1SDimitry Andric u16 popBlocksWithCV(CacheT *C, uptr ClassId, RegionInfo *Region, 868*0fca6ea1SDimitry Andric CompactPtrT *ToArray, const u16 MaxBlockCount, 8695f757f3fSDimitry Andric bool &ReportRegionExhausted) { 870*0fca6ea1SDimitry Andric u16 PopCount = 0; 8715f757f3fSDimitry Andric 8725f757f3fSDimitry Andric while (true) { 8735f757f3fSDimitry Andric // We only expect one thread doing the freelist refillment and other 8745f757f3fSDimitry Andric // threads will be waiting for either the completion of the 8755f757f3fSDimitry Andric // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other 8765f757f3fSDimitry Andric // threads. 8775f757f3fSDimitry Andric bool PopulateFreeList = false; 8785f757f3fSDimitry Andric { 8795f757f3fSDimitry Andric ScopedLock FL(Region->FLLock); 8805f757f3fSDimitry Andric if (!Region->isPopulatingFreeList) { 8815f757f3fSDimitry Andric Region->isPopulatingFreeList = true; 8825f757f3fSDimitry Andric PopulateFreeList = true; 8835f757f3fSDimitry Andric } 8845f757f3fSDimitry Andric } 8855f757f3fSDimitry Andric 8865f757f3fSDimitry Andric if (PopulateFreeList) { 8875f757f3fSDimitry Andric ScopedLock ML(Region->MMLock); 8885f757f3fSDimitry Andric 8895f757f3fSDimitry Andric const bool RegionIsExhausted = Region->Exhausted; 890*0fca6ea1SDimitry Andric if (!RegionIsExhausted) { 891*0fca6ea1SDimitry Andric PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray, 892*0fca6ea1SDimitry Andric MaxBlockCount); 893*0fca6ea1SDimitry Andric } 8945f757f3fSDimitry Andric ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted; 8955f757f3fSDimitry Andric 8965f757f3fSDimitry Andric { 8975f757f3fSDimitry Andric // Before reacquiring the `FLLock`, the freelist may be used up again 8985f757f3fSDimitry Andric // and some threads are waiting for the freelist refillment by the 8995f757f3fSDimitry Andric // current thread. It's important to set 9005f757f3fSDimitry Andric // `Region->isPopulatingFreeList` to false so the threads about to 9015f757f3fSDimitry Andric // sleep will notice the status change. 9025f757f3fSDimitry Andric ScopedLock FL(Region->FLLock); 9035f757f3fSDimitry Andric Region->isPopulatingFreeList = false; 9045f757f3fSDimitry Andric Region->FLLockCV.notifyAll(Region->FLLock); 9055f757f3fSDimitry Andric } 9065f757f3fSDimitry Andric 9075f757f3fSDimitry Andric break; 9085f757f3fSDimitry Andric } 9095f757f3fSDimitry Andric 9105f757f3fSDimitry Andric // At here, there are two preconditions to be met before waiting, 9115f757f3fSDimitry Andric // 1. The freelist is empty. 9125f757f3fSDimitry Andric // 2. Region->isPopulatingFreeList == true, i.e, someone is still doing 9135f757f3fSDimitry Andric // `populateFreeListAndPopBatch()`. 9145f757f3fSDimitry Andric // 9155f757f3fSDimitry Andric // Note that it has the chance that freelist is empty but 9165f757f3fSDimitry Andric // Region->isPopulatingFreeList == false because all the new populated 9175f757f3fSDimitry Andric // blocks were used up right after the refillment. Therefore, we have to 9185f757f3fSDimitry Andric // check if someone is still populating the freelist. 9195f757f3fSDimitry Andric ScopedLock FL(Region->FLLock); 920*0fca6ea1SDimitry Andric PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount); 921*0fca6ea1SDimitry Andric if (PopCount != 0U) 9225f757f3fSDimitry Andric break; 9235f757f3fSDimitry Andric 9245f757f3fSDimitry Andric if (!Region->isPopulatingFreeList) 9255f757f3fSDimitry Andric continue; 9265f757f3fSDimitry Andric 9275f757f3fSDimitry Andric // Now the freelist is empty and someone's doing the refillment. We will 9285f757f3fSDimitry Andric // wait until anyone refills the freelist or someone finishes doing 9295f757f3fSDimitry Andric // `populateFreeListAndPopBatch()`. The refillment can be done by 9305f757f3fSDimitry Andric // `populateFreeListAndPopBatch()`, `pushBlocks()`, 9315f757f3fSDimitry Andric // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`. 9325f757f3fSDimitry Andric Region->FLLockCV.wait(Region->FLLock); 9335f757f3fSDimitry Andric 934*0fca6ea1SDimitry Andric PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount); 935*0fca6ea1SDimitry Andric if (PopCount != 0U) 9365f757f3fSDimitry Andric break; 9375f757f3fSDimitry Andric } 9385f757f3fSDimitry Andric 939*0fca6ea1SDimitry Andric return PopCount; 9405f757f3fSDimitry Andric } 9415f757f3fSDimitry Andric 942*0fca6ea1SDimitry Andric u16 popBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region, 943*0fca6ea1SDimitry Andric CompactPtrT *ToArray, const u16 MaxBlockCount) 94406c3fb27SDimitry Andric REQUIRES(Region->FLLock) { 94506c3fb27SDimitry Andric if (Region->FreeListInfo.BlockList.empty()) 946*0fca6ea1SDimitry Andric return 0U; 947bdd1243dSDimitry Andric 9485f757f3fSDimitry Andric SinglyLinkedList<TransferBatchT> &Batches = 94906c3fb27SDimitry Andric Region->FreeListInfo.BlockList.front()->Batches; 95006c3fb27SDimitry Andric 95106c3fb27SDimitry Andric if (Batches.empty()) { 95206c3fb27SDimitry Andric DCHECK_EQ(ClassId, SizeClassMap::BatchClassId); 9535f757f3fSDimitry Andric BatchGroupT *BG = Region->FreeListInfo.BlockList.front(); 95406c3fb27SDimitry Andric Region->FreeListInfo.BlockList.pop_front(); 95506c3fb27SDimitry Andric 95606c3fb27SDimitry Andric // Block used by `BatchGroup` is from BatchClassId. Turn the block into 95706c3fb27SDimitry Andric // `TransferBatch` with single block. 9585f757f3fSDimitry Andric TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG); 959*0fca6ea1SDimitry Andric ToArray[0] = 960*0fca6ea1SDimitry Andric compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)); 96106c3fb27SDimitry Andric Region->FreeListInfo.PoppedBlocks += 1; 962*0fca6ea1SDimitry Andric return 1U; 96306c3fb27SDimitry Andric } 964bdd1243dSDimitry Andric 965*0fca6ea1SDimitry Andric // So far, instead of always filling blocks to `MaxBlockCount`, we only 966*0fca6ea1SDimitry Andric // examine single `TransferBatch` to minimize the time spent in the primary 967*0fca6ea1SDimitry Andric // allocator. Besides, the sizes of `TransferBatch` and 968*0fca6ea1SDimitry Andric // `CacheT::getMaxCached()` may also impact the time spent on accessing the 969*0fca6ea1SDimitry Andric // primary allocator. 970*0fca6ea1SDimitry Andric // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount` 971*0fca6ea1SDimitry Andric // blocks and/or adjust the size of `TransferBatch` according to 972*0fca6ea1SDimitry Andric // `CacheT::getMaxCached()`. 9735f757f3fSDimitry Andric TransferBatchT *B = Batches.front(); 974bdd1243dSDimitry Andric DCHECK_NE(B, nullptr); 975bdd1243dSDimitry Andric DCHECK_GT(B->getCount(), 0U); 976bdd1243dSDimitry Andric 977*0fca6ea1SDimitry Andric // BachClassId should always take all blocks in the TransferBatch. Read the 978*0fca6ea1SDimitry Andric // comment in `pushBatchClassBlocks()` for more details. 979*0fca6ea1SDimitry Andric const u16 PopCount = ClassId == SizeClassMap::BatchClassId 980*0fca6ea1SDimitry Andric ? B->getCount() 981*0fca6ea1SDimitry Andric : Min(MaxBlockCount, B->getCount()); 982*0fca6ea1SDimitry Andric B->moveNToArray(ToArray, PopCount); 983*0fca6ea1SDimitry Andric 984*0fca6ea1SDimitry Andric // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be 985*0fca6ea1SDimitry Andric // done without holding `FLLock`. 986*0fca6ea1SDimitry Andric if (B->empty()) { 987*0fca6ea1SDimitry Andric Batches.pop_front(); 988*0fca6ea1SDimitry Andric // `TransferBatch` of BatchClassId is self-contained, no need to 989*0fca6ea1SDimitry Andric // deallocate. Read the comment in `pushBatchClassBlocks()` for more 990*0fca6ea1SDimitry Andric // details. 991*0fca6ea1SDimitry Andric if (ClassId != SizeClassMap::BatchClassId) 992*0fca6ea1SDimitry Andric C->deallocate(SizeClassMap::BatchClassId, B); 993*0fca6ea1SDimitry Andric 994bdd1243dSDimitry Andric if (Batches.empty()) { 9955f757f3fSDimitry Andric BatchGroupT *BG = Region->FreeListInfo.BlockList.front(); 99606c3fb27SDimitry Andric Region->FreeListInfo.BlockList.pop_front(); 997bdd1243dSDimitry Andric 998*0fca6ea1SDimitry Andric // We don't keep BatchGroup with zero blocks to avoid empty-checking 999*0fca6ea1SDimitry Andric // while allocating. Note that block used for constructing BatchGroup is 1000*0fca6ea1SDimitry Andric // recorded as free blocks in the last element of BatchGroup::Batches. 1001*0fca6ea1SDimitry Andric // Which means, once we pop the last TransferBatch, the block is 1002*0fca6ea1SDimitry Andric // implicitly deallocated. 1003bdd1243dSDimitry Andric if (ClassId != SizeClassMap::BatchClassId) 1004bdd1243dSDimitry Andric C->deallocate(SizeClassMap::BatchClassId, BG); 1005bdd1243dSDimitry Andric } 1006bdd1243dSDimitry Andric } 1007bdd1243dSDimitry Andric 1008*0fca6ea1SDimitry Andric Region->FreeListInfo.PoppedBlocks += PopCount; 1009*0fca6ea1SDimitry Andric 1010*0fca6ea1SDimitry Andric return PopCount; 1011*0fca6ea1SDimitry Andric } 1012*0fca6ea1SDimitry Andric 1013*0fca6ea1SDimitry Andric NOINLINE u16 populateFreeListAndPopBlocks(CacheT *C, uptr ClassId, 1014*0fca6ea1SDimitry Andric RegionInfo *Region, 1015*0fca6ea1SDimitry Andric CompactPtrT *ToArray, 1016*0fca6ea1SDimitry Andric const u16 MaxBlockCount) 101706c3fb27SDimitry Andric REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { 1018*0fca6ea1SDimitry Andric if (!Config::getEnableContiguousRegions() && 1019*0fca6ea1SDimitry Andric !Region->MemMapInfo.MemMap.isAllocated()) { 1020*0fca6ea1SDimitry Andric ReservedMemoryT ReservedMemory; 1021*0fca6ea1SDimitry Andric if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize, 1022*0fca6ea1SDimitry Andric "scudo:primary_reserve", 1023*0fca6ea1SDimitry Andric MAP_ALLOWNOMEM))) { 1024*0fca6ea1SDimitry Andric Printf("Can't reserve pages for size class %zu.\n", 1025*0fca6ea1SDimitry Andric getSizeByClassId(ClassId)); 1026*0fca6ea1SDimitry Andric return 0U; 1027*0fca6ea1SDimitry Andric } 1028*0fca6ea1SDimitry Andric initRegion(Region, ClassId, 1029*0fca6ea1SDimitry Andric ReservedMemory.dispatch(ReservedMemory.getBase(), 1030*0fca6ea1SDimitry Andric ReservedMemory.getCapacity()), 1031*0fca6ea1SDimitry Andric /*EnableRandomOffset=*/false); 1032*0fca6ea1SDimitry Andric } 1033*0fca6ea1SDimitry Andric 1034*0fca6ea1SDimitry Andric DCHECK(Region->MemMapInfo.MemMap.isAllocated()); 10350b57cec5SDimitry Andric const uptr Size = getSizeByClassId(ClassId); 10365f757f3fSDimitry Andric const u16 MaxCount = CacheT::getMaxCached(Size); 10370b57cec5SDimitry Andric const uptr RegionBeg = Region->RegionBeg; 103806c3fb27SDimitry Andric const uptr MappedUser = Region->MemMapInfo.MappedUser; 103906c3fb27SDimitry Andric const uptr TotalUserBytes = 104006c3fb27SDimitry Andric Region->MemMapInfo.AllocatedUser + MaxCount * Size; 10410b57cec5SDimitry Andric // Map more space for blocks, if necessary. 1042fe6060f1SDimitry Andric if (TotalUserBytes > MappedUser) { 10430b57cec5SDimitry Andric // Do the mmap for the user memory. 1044fe6060f1SDimitry Andric const uptr MapSize = 104506c3fb27SDimitry Andric roundUp(TotalUserBytes - MappedUser, MapSizeIncrement); 10460b57cec5SDimitry Andric const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId); 1047fe6060f1SDimitry Andric if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) { 10480b57cec5SDimitry Andric Region->Exhausted = true; 1049*0fca6ea1SDimitry Andric return 0U; 10500b57cec5SDimitry Andric } 105106c3fb27SDimitry Andric 105206c3fb27SDimitry Andric if (UNLIKELY(!Region->MemMapInfo.MemMap.remap( 105306c3fb27SDimitry Andric RegionBeg + MappedUser, MapSize, "scudo:primary", 10545ffd83dbSDimitry Andric MAP_ALLOWNOMEM | MAP_RESIZABLE | 105506c3fb27SDimitry Andric (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG 105606c3fb27SDimitry Andric : 0)))) { 1057*0fca6ea1SDimitry Andric return 0U; 1058bdd1243dSDimitry Andric } 105906c3fb27SDimitry Andric Region->MemMapInfo.MappedUser += MapSize; 1060fe6060f1SDimitry Andric C->getStats().add(StatMapped, MapSize); 10610b57cec5SDimitry Andric } 10620b57cec5SDimitry Andric 106306c3fb27SDimitry Andric const u32 NumberOfBlocks = 106406c3fb27SDimitry Andric Min(MaxNumBatches * MaxCount, 106506c3fb27SDimitry Andric static_cast<u32>((Region->MemMapInfo.MappedUser - 106606c3fb27SDimitry Andric Region->MemMapInfo.AllocatedUser) / 106706c3fb27SDimitry Andric Size)); 10680b57cec5SDimitry Andric DCHECK_GT(NumberOfBlocks, 0); 10690b57cec5SDimitry Andric 1070480093f4SDimitry Andric constexpr u32 ShuffleArraySize = 10715f757f3fSDimitry Andric MaxNumBatches * TransferBatchT::MaxNumCached; 1072fe6060f1SDimitry Andric CompactPtrT ShuffleArray[ShuffleArraySize]; 1073e8d8bef9SDimitry Andric DCHECK_LE(NumberOfBlocks, ShuffleArraySize); 1074e8d8bef9SDimitry Andric 1075fe6060f1SDimitry Andric const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId); 107606c3fb27SDimitry Andric uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser; 1077e8d8bef9SDimitry Andric for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) 1078fe6060f1SDimitry Andric ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P); 107906c3fb27SDimitry Andric 108006c3fb27SDimitry Andric ScopedLock L(Region->FLLock); 108106c3fb27SDimitry Andric 108206c3fb27SDimitry Andric if (ClassId != SizeClassMap::BatchClassId) { 108306c3fb27SDimitry Andric u32 N = 1; 108406c3fb27SDimitry Andric uptr CurGroup = compactPtrGroup(ShuffleArray[0]); 108506c3fb27SDimitry Andric for (u32 I = 1; I < NumberOfBlocks; I++) { 108606c3fb27SDimitry Andric if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) { 108706c3fb27SDimitry Andric shuffle(ShuffleArray + I - N, N, &Region->RandState); 108806c3fb27SDimitry Andric pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N, 108906c3fb27SDimitry Andric /*SameGroup=*/true); 109006c3fb27SDimitry Andric N = 1; 109106c3fb27SDimitry Andric CurGroup = compactPtrGroup(ShuffleArray[I]); 109206c3fb27SDimitry Andric } else { 109306c3fb27SDimitry Andric ++N; 1094480093f4SDimitry Andric } 109506c3fb27SDimitry Andric } 109606c3fb27SDimitry Andric 109706c3fb27SDimitry Andric shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState); 109806c3fb27SDimitry Andric pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N, 109906c3fb27SDimitry Andric /*SameGroup=*/true); 110006c3fb27SDimitry Andric } else { 110106c3fb27SDimitry Andric pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks); 110206c3fb27SDimitry Andric } 110306c3fb27SDimitry Andric 1104*0fca6ea1SDimitry Andric const u16 PopCount = 1105*0fca6ea1SDimitry Andric popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount); 1106*0fca6ea1SDimitry Andric DCHECK_NE(PopCount, 0U); 110706c3fb27SDimitry Andric 110806c3fb27SDimitry Andric // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record 110906c3fb27SDimitry Andric // the requests from `PushBlocks` and `PopBatch` which are external 111006c3fb27SDimitry Andric // interfaces. `populateFreeListAndPopBatch` is the internal interface so we 111106c3fb27SDimitry Andric // should set the values back to avoid incorrectly setting the stats. 111206c3fb27SDimitry Andric Region->FreeListInfo.PushedBlocks -= NumberOfBlocks; 11130b57cec5SDimitry Andric 1114e8d8bef9SDimitry Andric const uptr AllocatedUser = Size * NumberOfBlocks; 111568d75effSDimitry Andric C->getStats().add(StatFree, AllocatedUser); 111606c3fb27SDimitry Andric Region->MemMapInfo.AllocatedUser += AllocatedUser; 11170b57cec5SDimitry Andric 1118*0fca6ea1SDimitry Andric return PopCount; 11190b57cec5SDimitry Andric } 11200b57cec5SDimitry Andric 112106c3fb27SDimitry Andric void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region) 112206c3fb27SDimitry Andric REQUIRES(Region->MMLock, Region->FLLock) { 112306c3fb27SDimitry Andric if (Region->MemMapInfo.MappedUser == 0) 11240b57cec5SDimitry Andric return; 112506c3fb27SDimitry Andric const uptr BlockSize = getSizeByClassId(ClassId); 11265f757f3fSDimitry Andric const uptr InUseBlocks = 112706c3fb27SDimitry Andric Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks; 112806c3fb27SDimitry Andric const uptr BytesInFreeList = 11295f757f3fSDimitry Andric Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize; 113006c3fb27SDimitry Andric uptr RegionPushedBytesDelta = 0; 113106c3fb27SDimitry Andric if (BytesInFreeList >= 113206c3fb27SDimitry Andric Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) { 113306c3fb27SDimitry Andric RegionPushedBytesDelta = 113406c3fb27SDimitry Andric BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint; 113506c3fb27SDimitry Andric } 113606c3fb27SDimitry Andric const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize; 113706c3fb27SDimitry Andric Str->append( 113806c3fb27SDimitry Andric "%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu " 113906c3fb27SDimitry Andric "inuse: %6zu total: %6zu releases: %6zu last " 114006c3fb27SDimitry Andric "released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n", 11415f757f3fSDimitry Andric Region->Exhausted ? "E" : " ", ClassId, getSizeByClassId(ClassId), 114206c3fb27SDimitry Andric Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks, 11435f757f3fSDimitry Andric Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks, 114406c3fb27SDimitry Andric Region->ReleaseInfo.RangesReleased, 114506c3fb27SDimitry Andric Region->ReleaseInfo.LastReleasedBytes >> 10, 114606c3fb27SDimitry Andric RegionPushedBytesDelta >> 10, Region->RegionBeg, 11470b57cec5SDimitry Andric getRegionBaseByClassId(ClassId)); 11480b57cec5SDimitry Andric } 11490b57cec5SDimitry Andric 11505f757f3fSDimitry Andric void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId, 11515f757f3fSDimitry Andric ScopedString *Str) REQUIRES(Region->MMLock) { 11525f757f3fSDimitry Andric const uptr BlockSize = getSizeByClassId(ClassId); 11535f757f3fSDimitry Andric const uptr AllocatedUserEnd = 11545f757f3fSDimitry Andric Region->MemMapInfo.AllocatedUser + Region->RegionBeg; 11555f757f3fSDimitry Andric 11565f757f3fSDimitry Andric SinglyLinkedList<BatchGroupT> GroupsToRelease; 11575f757f3fSDimitry Andric { 11585f757f3fSDimitry Andric ScopedLock L(Region->FLLock); 11595f757f3fSDimitry Andric GroupsToRelease = Region->FreeListInfo.BlockList; 11605f757f3fSDimitry Andric Region->FreeListInfo.BlockList.clear(); 11615f757f3fSDimitry Andric } 11625f757f3fSDimitry Andric 11635f757f3fSDimitry Andric FragmentationRecorder Recorder; 11645f757f3fSDimitry Andric if (!GroupsToRelease.empty()) { 11655f757f3fSDimitry Andric PageReleaseContext Context = 11665f757f3fSDimitry Andric markFreeBlocks(Region, BlockSize, AllocatedUserEnd, 11675f757f3fSDimitry Andric getCompactPtrBaseByClassId(ClassId), GroupsToRelease); 11685f757f3fSDimitry Andric auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; }; 11695f757f3fSDimitry Andric releaseFreeMemoryToOS(Context, Recorder, SkipRegion); 11705f757f3fSDimitry Andric 11715f757f3fSDimitry Andric mergeGroupsToReleaseBack(Region, GroupsToRelease); 11725f757f3fSDimitry Andric } 11735f757f3fSDimitry Andric 11745f757f3fSDimitry Andric ScopedLock L(Region->FLLock); 11755f757f3fSDimitry Andric const uptr PageSize = getPageSizeCached(); 11765f757f3fSDimitry Andric const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize; 11775f757f3fSDimitry Andric const uptr InUseBlocks = 11785f757f3fSDimitry Andric Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks; 11795f757f3fSDimitry Andric const uptr AllocatedPagesCount = 11805f757f3fSDimitry Andric roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize; 11815f757f3fSDimitry Andric DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount()); 11825f757f3fSDimitry Andric const uptr InUsePages = 11835f757f3fSDimitry Andric AllocatedPagesCount - Recorder.getReleasedPagesCount(); 11845f757f3fSDimitry Andric const uptr InUseBytes = InUsePages * PageSize; 11855f757f3fSDimitry Andric 11865f757f3fSDimitry Andric uptr Integral; 11875f757f3fSDimitry Andric uptr Fractional; 11885f757f3fSDimitry Andric computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral, 11895f757f3fSDimitry Andric &Fractional); 11905f757f3fSDimitry Andric Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total " 11915f757f3fSDimitry Andric "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n", 11925f757f3fSDimitry Andric ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages, 11935f757f3fSDimitry Andric AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional); 11945f757f3fSDimitry Andric } 11955f757f3fSDimitry Andric 119668d75effSDimitry Andric NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId, 119706c3fb27SDimitry Andric ReleaseToOS ReleaseType = ReleaseToOS::Normal) 119806c3fb27SDimitry Andric REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { 11995f757f3fSDimitry Andric const uptr BlockSize = getSizeByClassId(ClassId); 12005f757f3fSDimitry Andric uptr BytesInFreeList; 12015f757f3fSDimitry Andric const uptr AllocatedUserEnd = 12025f757f3fSDimitry Andric Region->MemMapInfo.AllocatedUser + Region->RegionBeg; 12035f757f3fSDimitry Andric SinglyLinkedList<BatchGroupT> GroupsToRelease; 12045f757f3fSDimitry Andric 12055f757f3fSDimitry Andric { 120606c3fb27SDimitry Andric ScopedLock L(Region->FLLock); 120706c3fb27SDimitry Andric 12085f757f3fSDimitry Andric BytesInFreeList = Region->MemMapInfo.AllocatedUser - 12095f757f3fSDimitry Andric (Region->FreeListInfo.PoppedBlocks - 121006c3fb27SDimitry Andric Region->FreeListInfo.PushedBlocks) * 121106c3fb27SDimitry Andric BlockSize; 121206c3fb27SDimitry Andric if (UNLIKELY(BytesInFreeList == 0)) 121306c3fb27SDimitry Andric return false; 121406c3fb27SDimitry Andric 12155f757f3fSDimitry Andric // ==================================================================== // 121606c3fb27SDimitry Andric // 1. Check if we have enough free blocks and if it's worth doing a page 121706c3fb27SDimitry Andric // release. 12185f757f3fSDimitry Andric // ==================================================================== // 121906c3fb27SDimitry Andric if (ReleaseType != ReleaseToOS::ForceAll && 122006c3fb27SDimitry Andric !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList, 122106c3fb27SDimitry Andric ReleaseType)) { 122206c3fb27SDimitry Andric return 0; 122306c3fb27SDimitry Andric } 122406c3fb27SDimitry Andric 12255f757f3fSDimitry Andric // ==================================================================== // 122606c3fb27SDimitry Andric // 2. Determine which groups can release the pages. Use a heuristic to 122706c3fb27SDimitry Andric // gather groups that are candidates for doing a release. 12285f757f3fSDimitry Andric // ==================================================================== // 122906c3fb27SDimitry Andric if (ReleaseType == ReleaseToOS::ForceAll) { 123006c3fb27SDimitry Andric GroupsToRelease = Region->FreeListInfo.BlockList; 123106c3fb27SDimitry Andric Region->FreeListInfo.BlockList.clear(); 123206c3fb27SDimitry Andric } else { 12335f757f3fSDimitry Andric GroupsToRelease = 12345f757f3fSDimitry Andric collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd, 12355f757f3fSDimitry Andric getCompactPtrBaseByClassId(ClassId)); 123606c3fb27SDimitry Andric } 123706c3fb27SDimitry Andric if (GroupsToRelease.empty()) 123806c3fb27SDimitry Andric return 0; 123906c3fb27SDimitry Andric } 124006c3fb27SDimitry Andric 124106c3fb27SDimitry Andric // Note that we have extracted the `GroupsToRelease` from region freelist. 1242*0fca6ea1SDimitry Andric // It's safe to let pushBlocks()/popBlocks() access the remaining region 124306c3fb27SDimitry Andric // freelist. In the steps 3 and 4, we will temporarily release the FLLock 124406c3fb27SDimitry Andric // and lock it again before step 5. 124506c3fb27SDimitry Andric 124606c3fb27SDimitry Andric // ==================================================================== // 12475f757f3fSDimitry Andric // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`. 12485f757f3fSDimitry Andric // Then we can tell which pages are in-use by querying 12495f757f3fSDimitry Andric // `PageReleaseContext`. 125006c3fb27SDimitry Andric // ==================================================================== // 12515f757f3fSDimitry Andric PageReleaseContext Context = 12525f757f3fSDimitry Andric markFreeBlocks(Region, BlockSize, AllocatedUserEnd, 12535f757f3fSDimitry Andric getCompactPtrBaseByClassId(ClassId), GroupsToRelease); 125406c3fb27SDimitry Andric if (UNLIKELY(!Context.hasBlockMarked())) { 125506c3fb27SDimitry Andric mergeGroupsToReleaseBack(Region, GroupsToRelease); 125606c3fb27SDimitry Andric return 0; 125706c3fb27SDimitry Andric } 125806c3fb27SDimitry Andric 125906c3fb27SDimitry Andric // ==================================================================== // 126006c3fb27SDimitry Andric // 4. Release the unused physical pages back to the OS. 126106c3fb27SDimitry Andric // ==================================================================== // 126206c3fb27SDimitry Andric RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap, 126306c3fb27SDimitry Andric Region->RegionBeg, 126406c3fb27SDimitry Andric Context.getReleaseOffset()); 126506c3fb27SDimitry Andric auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; }; 126606c3fb27SDimitry Andric releaseFreeMemoryToOS(Context, Recorder, SkipRegion); 126706c3fb27SDimitry Andric if (Recorder.getReleasedRangesCount() > 0) { 126806c3fb27SDimitry Andric Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList; 126906c3fb27SDimitry Andric Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount(); 127006c3fb27SDimitry Andric Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes(); 127106c3fb27SDimitry Andric } 127206c3fb27SDimitry Andric Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast(); 127306c3fb27SDimitry Andric 127406c3fb27SDimitry Andric // ====================================================================== // 127506c3fb27SDimitry Andric // 5. Merge the `GroupsToRelease` back to the freelist. 127606c3fb27SDimitry Andric // ====================================================================== // 127706c3fb27SDimitry Andric mergeGroupsToReleaseBack(Region, GroupsToRelease); 127806c3fb27SDimitry Andric 12795f757f3fSDimitry Andric return Recorder.getReleasedBytes(); 128006c3fb27SDimitry Andric } 128106c3fb27SDimitry Andric 128206c3fb27SDimitry Andric bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize, 128306c3fb27SDimitry Andric uptr BytesInFreeList, ReleaseToOS ReleaseType) 128406c3fb27SDimitry Andric REQUIRES(Region->MMLock, Region->FLLock) { 128506c3fb27SDimitry Andric DCHECK_GE(Region->FreeListInfo.PoppedBlocks, 128606c3fb27SDimitry Andric Region->FreeListInfo.PushedBlocks); 12870b57cec5SDimitry Andric const uptr PageSize = getPageSizeCached(); 12880b57cec5SDimitry Andric 128906c3fb27SDimitry Andric // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value 129006c3fb27SDimitry Andric // so that we won't underestimate the releasable pages. For example, the 129106c3fb27SDimitry Andric // following is the region usage, 129206c3fb27SDimitry Andric // 129306c3fb27SDimitry Andric // BytesInFreeListAtLastCheckpoint AllocatedUser 129406c3fb27SDimitry Andric // v v 129506c3fb27SDimitry Andric // |---------------------------------------> 129606c3fb27SDimitry Andric // ^ ^ 129706c3fb27SDimitry Andric // BytesInFreeList ReleaseThreshold 129806c3fb27SDimitry Andric // 129906c3fb27SDimitry Andric // In general, if we have collected enough bytes and the amount of free 130006c3fb27SDimitry Andric // bytes meets the ReleaseThreshold, we will try to do page release. If we 130106c3fb27SDimitry Andric // don't update `BytesInFreeListAtLastCheckpoint` when the current 130206c3fb27SDimitry Andric // `BytesInFreeList` is smaller, we may take longer time to wait for enough 130306c3fb27SDimitry Andric // freed blocks because we miss the bytes between 130406c3fb27SDimitry Andric // (BytesInFreeListAtLastCheckpoint - BytesInFreeList). 130506c3fb27SDimitry Andric if (BytesInFreeList <= 130606c3fb27SDimitry Andric Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) { 130706c3fb27SDimitry Andric Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList; 130806c3fb27SDimitry Andric } 13090b57cec5SDimitry Andric 131006c3fb27SDimitry Andric const uptr RegionPushedBytesDelta = 131106c3fb27SDimitry Andric BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint; 131206c3fb27SDimitry Andric if (RegionPushedBytesDelta < PageSize) 131306c3fb27SDimitry Andric return false; 131406c3fb27SDimitry Andric 1315e8d8bef9SDimitry Andric // Releasing smaller blocks is expensive, so we want to make sure that a 1316e8d8bef9SDimitry Andric // significant amount of bytes are free, and that there has been a good 1317e8d8bef9SDimitry Andric // amount of batches pushed to the freelist before attempting to release. 131806c3fb27SDimitry Andric if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal) 131906c3fb27SDimitry Andric if (RegionPushedBytesDelta < Region->TryReleaseThreshold) 132006c3fb27SDimitry Andric return false; 1321e8d8bef9SDimitry Andric 132206c3fb27SDimitry Andric if (ReleaseType == ReleaseToOS::Normal) { 1323e8d8bef9SDimitry Andric const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); 13240b57cec5SDimitry Andric if (IntervalMs < 0) 132506c3fb27SDimitry Andric return false; 132606c3fb27SDimitry Andric 132706c3fb27SDimitry Andric // The constant 8 here is selected from profiling some apps and the number 132806c3fb27SDimitry Andric // of unreleased pages in the large size classes is around 16 pages or 132906c3fb27SDimitry Andric // more. Choose half of it as a heuristic and which also avoids page 133006c3fb27SDimitry Andric // release every time for every pushBlocks() attempt by large blocks. 133106c3fb27SDimitry Andric const bool ByPassReleaseInterval = 133206c3fb27SDimitry Andric isLargeBlock(BlockSize) && RegionPushedBytesDelta > 8 * PageSize; 133306c3fb27SDimitry Andric if (!ByPassReleaseInterval) { 133468d75effSDimitry Andric if (Region->ReleaseInfo.LastReleaseAtNs + 13355ffd83dbSDimitry Andric static_cast<u64>(IntervalMs) * 1000000 > 133606c3fb27SDimitry Andric getMonotonicTimeFast()) { 133706c3fb27SDimitry Andric // Memory was returned recently. 133806c3fb27SDimitry Andric return false; 13390b57cec5SDimitry Andric } 13400b57cec5SDimitry Andric } 134106c3fb27SDimitry Andric } // if (ReleaseType == ReleaseToOS::Normal) 134206c3fb27SDimitry Andric 134306c3fb27SDimitry Andric return true; 134406c3fb27SDimitry Andric } 13450b57cec5SDimitry Andric 13465f757f3fSDimitry Andric SinglyLinkedList<BatchGroupT> 134706c3fb27SDimitry Andric collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize, 134806c3fb27SDimitry Andric const uptr AllocatedUserEnd, const uptr CompactPtrBase) 134906c3fb27SDimitry Andric REQUIRES(Region->MMLock, Region->FLLock) { 13505f757f3fSDimitry Andric const uptr GroupSize = (1UL << GroupSizeLog); 135106c3fb27SDimitry Andric const uptr PageSize = getPageSizeCached(); 13525f757f3fSDimitry Andric SinglyLinkedList<BatchGroupT> GroupsToRelease; 1353bdd1243dSDimitry Andric 135406c3fb27SDimitry Andric // We are examining each group and will take the minimum distance to the 135506c3fb27SDimitry Andric // release threshold as the next Region::TryReleaseThreshold(). Note that if 135606c3fb27SDimitry Andric // the size of free blocks has reached the release threshold, the distance 135706c3fb27SDimitry Andric // to the next release will be PageSize * SmallerBlockReleasePageDelta. See 135806c3fb27SDimitry Andric // the comment on `SmallerBlockReleasePageDelta` for more details. 135906c3fb27SDimitry Andric uptr MinDistToThreshold = GroupSize; 1360bdd1243dSDimitry Andric 13615f757f3fSDimitry Andric for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(), 136206c3fb27SDimitry Andric *Prev = nullptr; 136306c3fb27SDimitry Andric BG != nullptr;) { 136406c3fb27SDimitry Andric // Group boundary is always GroupSize-aligned from CompactPtr base. The 136506c3fb27SDimitry Andric // layout of memory groups is like, 1366bdd1243dSDimitry Andric // 136706c3fb27SDimitry Andric // (CompactPtrBase) 136806c3fb27SDimitry Andric // #1 CompactPtrGroupBase #2 CompactPtrGroupBase ... 1369bdd1243dSDimitry Andric // | | | 1370bdd1243dSDimitry Andric // v v v 137106c3fb27SDimitry Andric // +-----------------------+-----------------------+ 137206c3fb27SDimitry Andric // \ / \ / 137306c3fb27SDimitry Andric // --- GroupSize --- --- GroupSize --- 1374bdd1243dSDimitry Andric // 137506c3fb27SDimitry Andric // After decompacting the CompactPtrGroupBase, we expect the alignment 137606c3fb27SDimitry Andric // property is held as well. 137706c3fb27SDimitry Andric const uptr BatchGroupBase = 137806c3fb27SDimitry Andric decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase); 137906c3fb27SDimitry Andric DCHECK_LE(Region->RegionBeg, BatchGroupBase); 138006c3fb27SDimitry Andric DCHECK_GE(AllocatedUserEnd, BatchGroupBase); 138106c3fb27SDimitry Andric DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U); 1382bdd1243dSDimitry Andric // TransferBatches are pushed in front of BG.Batches. The first one may 1383bdd1243dSDimitry Andric // not have all caches used. 138406c3fb27SDimitry Andric const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch + 138506c3fb27SDimitry Andric BG->Batches.front()->getCount(); 1386bdd1243dSDimitry Andric const uptr BytesInBG = NumBlocks * BlockSize; 138706c3fb27SDimitry Andric 138806c3fb27SDimitry Andric if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) { 138906c3fb27SDimitry Andric BG->BytesInBGAtLastCheckpoint = BytesInBG; 139006c3fb27SDimitry Andric Prev = BG; 139106c3fb27SDimitry Andric BG = BG->Next; 139206c3fb27SDimitry Andric continue; 139306c3fb27SDimitry Andric } 139406c3fb27SDimitry Andric 1395*0fca6ea1SDimitry Andric const uptr PushedBytesDelta = BytesInBG - BG->BytesInBGAtLastCheckpoint; 139606c3fb27SDimitry Andric 1397bdd1243dSDimitry Andric // Given the randomness property, we try to release the pages only if the 1398bdd1243dSDimitry Andric // bytes used by free blocks exceed certain proportion of group size. Note 1399bdd1243dSDimitry Andric // that this heuristic only applies when all the spaces in a BatchGroup 1400bdd1243dSDimitry Andric // are allocated. 140106c3fb27SDimitry Andric if (isSmallBlock(BlockSize)) { 140206c3fb27SDimitry Andric const uptr BatchGroupEnd = BatchGroupBase + GroupSize; 140306c3fb27SDimitry Andric const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd 140406c3fb27SDimitry Andric ? GroupSize 140506c3fb27SDimitry Andric : AllocatedUserEnd - BatchGroupBase; 140606c3fb27SDimitry Andric const uptr ReleaseThreshold = 140706c3fb27SDimitry Andric (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U; 140806c3fb27SDimitry Andric const bool HighDensity = BytesInBG >= ReleaseThreshold; 140906c3fb27SDimitry Andric const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize); 141006c3fb27SDimitry Andric // If all blocks in the group are released, we will do range marking 141106c3fb27SDimitry Andric // which is fast. Otherwise, we will wait until we have accumulated 141206c3fb27SDimitry Andric // a certain amount of free memory. 141306c3fb27SDimitry Andric const bool ReachReleaseDelta = 141406c3fb27SDimitry Andric MayHaveReleasedAll 141506c3fb27SDimitry Andric ? true 141606c3fb27SDimitry Andric : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta; 141706c3fb27SDimitry Andric 141806c3fb27SDimitry Andric if (!HighDensity) { 141906c3fb27SDimitry Andric DCHECK_LE(BytesInBG, ReleaseThreshold); 142006c3fb27SDimitry Andric // The following is the usage of a memroy group, 142106c3fb27SDimitry Andric // 142206c3fb27SDimitry Andric // BytesInBG ReleaseThreshold 142306c3fb27SDimitry Andric // / \ v 142406c3fb27SDimitry Andric // +---+---------------------------+-----+ 142506c3fb27SDimitry Andric // | | | | | 142606c3fb27SDimitry Andric // +---+---------------------------+-----+ 142706c3fb27SDimitry Andric // \ / ^ 142806c3fb27SDimitry Andric // PushedBytesDelta GroupEnd 142906c3fb27SDimitry Andric MinDistToThreshold = 143006c3fb27SDimitry Andric Min(MinDistToThreshold, 143106c3fb27SDimitry Andric ReleaseThreshold - BytesInBG + PushedBytesDelta); 143206c3fb27SDimitry Andric } else { 143306c3fb27SDimitry Andric // If it reaches high density at this round, the next time we will try 143406c3fb27SDimitry Andric // to release is based on SmallerBlockReleasePageDelta 143506c3fb27SDimitry Andric MinDistToThreshold = 143606c3fb27SDimitry Andric Min(MinDistToThreshold, PageSize * SmallerBlockReleasePageDelta); 143706c3fb27SDimitry Andric } 143806c3fb27SDimitry Andric 143906c3fb27SDimitry Andric if (!HighDensity || !ReachReleaseDelta) { 144006c3fb27SDimitry Andric Prev = BG; 144106c3fb27SDimitry Andric BG = BG->Next; 144206c3fb27SDimitry Andric continue; 144306c3fb27SDimitry Andric } 144406c3fb27SDimitry Andric } 144506c3fb27SDimitry Andric 14465f757f3fSDimitry Andric // If `BG` is the first BatchGroupT in the list, we only need to advance 144706c3fb27SDimitry Andric // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed 144806c3fb27SDimitry Andric // for `Prev`. 144906c3fb27SDimitry Andric // 145006c3fb27SDimitry Andric // (BG) (BG->Next) 145106c3fb27SDimitry Andric // Prev Cur BG 145206c3fb27SDimitry Andric // | | | 145306c3fb27SDimitry Andric // v v v 145406c3fb27SDimitry Andric // nil +--+ +--+ 145506c3fb27SDimitry Andric // |X | -> | | -> ... 145606c3fb27SDimitry Andric // +--+ +--+ 145706c3fb27SDimitry Andric // 145806c3fb27SDimitry Andric // Otherwise, `Prev` will be used to extract the `Cur` from the 145906c3fb27SDimitry Andric // `FreeListInfo.BlockList`. 146006c3fb27SDimitry Andric // 146106c3fb27SDimitry Andric // (BG) (BG->Next) 146206c3fb27SDimitry Andric // Prev Cur BG 146306c3fb27SDimitry Andric // | | | 146406c3fb27SDimitry Andric // v v v 146506c3fb27SDimitry Andric // +--+ +--+ +--+ 146606c3fb27SDimitry Andric // | | -> |X | -> | | -> ... 146706c3fb27SDimitry Andric // +--+ +--+ +--+ 146806c3fb27SDimitry Andric // 146906c3fb27SDimitry Andric // After FreeListInfo.BlockList::extract(), 147006c3fb27SDimitry Andric // 147106c3fb27SDimitry Andric // Prev Cur BG 147206c3fb27SDimitry Andric // | | | 147306c3fb27SDimitry Andric // v v v 147406c3fb27SDimitry Andric // +--+ +--+ +--+ 147506c3fb27SDimitry Andric // | |-+ |X | +->| | -> ... 147606c3fb27SDimitry Andric // +--+ | +--+ | +--+ 147706c3fb27SDimitry Andric // +--------+ 147806c3fb27SDimitry Andric // 147906c3fb27SDimitry Andric // Note that we need to advance before pushing this BatchGroup to 148006c3fb27SDimitry Andric // GroupsToRelease because it's a destructive operation. 148106c3fb27SDimitry Andric 14825f757f3fSDimitry Andric BatchGroupT *Cur = BG; 148306c3fb27SDimitry Andric BG = BG->Next; 148406c3fb27SDimitry Andric 148506c3fb27SDimitry Andric // Ideally, we may want to update this only after successful release. 148606c3fb27SDimitry Andric // However, for smaller blocks, each block marking is a costly operation. 148706c3fb27SDimitry Andric // Therefore, we update it earlier. 148806c3fb27SDimitry Andric // TODO: Consider updating this after releasing pages if `ReleaseRecorder` 148906c3fb27SDimitry Andric // can tell the released bytes in each group. 149006c3fb27SDimitry Andric Cur->BytesInBGAtLastCheckpoint = BytesInBG; 149106c3fb27SDimitry Andric 149206c3fb27SDimitry Andric if (Prev != nullptr) 149306c3fb27SDimitry Andric Region->FreeListInfo.BlockList.extract(Prev, Cur); 149406c3fb27SDimitry Andric else 149506c3fb27SDimitry Andric Region->FreeListInfo.BlockList.pop_front(); 149606c3fb27SDimitry Andric GroupsToRelease.push_back(Cur); 149706c3fb27SDimitry Andric } 149806c3fb27SDimitry Andric 149906c3fb27SDimitry Andric // Only small blocks have the adaptive `TryReleaseThreshold`. 150006c3fb27SDimitry Andric if (isSmallBlock(BlockSize)) { 150106c3fb27SDimitry Andric // If the MinDistToThreshold is not updated, that means each memory group 150206c3fb27SDimitry Andric // may have only pushed less than a page size. In that case, just set it 150306c3fb27SDimitry Andric // back to normal. 150406c3fb27SDimitry Andric if (MinDistToThreshold == GroupSize) 150506c3fb27SDimitry Andric MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta; 150606c3fb27SDimitry Andric Region->TryReleaseThreshold = MinDistToThreshold; 150706c3fb27SDimitry Andric } 150806c3fb27SDimitry Andric 150906c3fb27SDimitry Andric return GroupsToRelease; 151006c3fb27SDimitry Andric } 151106c3fb27SDimitry Andric 151206c3fb27SDimitry Andric PageReleaseContext 151306c3fb27SDimitry Andric markFreeBlocks(RegionInfo *Region, const uptr BlockSize, 151406c3fb27SDimitry Andric const uptr AllocatedUserEnd, const uptr CompactPtrBase, 15155f757f3fSDimitry Andric SinglyLinkedList<BatchGroupT> &GroupsToRelease) 151606c3fb27SDimitry Andric REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { 15175f757f3fSDimitry Andric const uptr GroupSize = (1UL << GroupSizeLog); 151806c3fb27SDimitry Andric auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) { 151906c3fb27SDimitry Andric return decompactPtrInternal(CompactPtrBase, CompactPtr); 152006c3fb27SDimitry Andric }; 152106c3fb27SDimitry Andric 152206c3fb27SDimitry Andric const uptr ReleaseBase = decompactGroupBase( 152306c3fb27SDimitry Andric CompactPtrBase, GroupsToRelease.front()->CompactPtrGroupBase); 152406c3fb27SDimitry Andric const uptr LastGroupEnd = 152506c3fb27SDimitry Andric Min(decompactGroupBase(CompactPtrBase, 152606c3fb27SDimitry Andric GroupsToRelease.back()->CompactPtrGroupBase) + 152706c3fb27SDimitry Andric GroupSize, 152806c3fb27SDimitry Andric AllocatedUserEnd); 152906c3fb27SDimitry Andric // The last block may straddle the group boundary. Rounding up to BlockSize 153006c3fb27SDimitry Andric // to get the exact range. 153106c3fb27SDimitry Andric const uptr ReleaseEnd = 153206c3fb27SDimitry Andric roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) + 153306c3fb27SDimitry Andric Region->RegionBeg; 153406c3fb27SDimitry Andric const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase; 153506c3fb27SDimitry Andric const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg; 153606c3fb27SDimitry Andric 153706c3fb27SDimitry Andric PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, 153806c3fb27SDimitry Andric ReleaseRangeSize, ReleaseOffset); 153906c3fb27SDimitry Andric // We may not be able to do the page release in a rare case that we may 154006c3fb27SDimitry Andric // fail on PageMap allocation. 154106c3fb27SDimitry Andric if (UNLIKELY(!Context.ensurePageMapAllocated())) 154206c3fb27SDimitry Andric return Context; 154306c3fb27SDimitry Andric 15445f757f3fSDimitry Andric for (BatchGroupT &BG : GroupsToRelease) { 154506c3fb27SDimitry Andric const uptr BatchGroupBase = 154606c3fb27SDimitry Andric decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase); 154706c3fb27SDimitry Andric const uptr BatchGroupEnd = BatchGroupBase + GroupSize; 154806c3fb27SDimitry Andric const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd 154906c3fb27SDimitry Andric ? GroupSize 155006c3fb27SDimitry Andric : AllocatedUserEnd - BatchGroupBase; 155106c3fb27SDimitry Andric const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize; 155206c3fb27SDimitry Andric const bool MayContainLastBlockInRegion = 155306c3fb27SDimitry Andric BatchGroupUsedEnd == AllocatedUserEnd; 155406c3fb27SDimitry Andric const bool BlockAlignedWithUsedEnd = 155506c3fb27SDimitry Andric (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0; 155606c3fb27SDimitry Andric 155706c3fb27SDimitry Andric uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize; 155806c3fb27SDimitry Andric if (!BlockAlignedWithUsedEnd) 155906c3fb27SDimitry Andric ++MaxContainedBlocks; 156006c3fb27SDimitry Andric 156106c3fb27SDimitry Andric const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch + 156206c3fb27SDimitry Andric BG.Batches.front()->getCount(); 156306c3fb27SDimitry Andric 156406c3fb27SDimitry Andric if (NumBlocks == MaxContainedBlocks) { 156506c3fb27SDimitry Andric for (const auto &It : BG.Batches) { 156606c3fb27SDimitry Andric if (&It != BG.Batches.front()) 156706c3fb27SDimitry Andric DCHECK_EQ(It.getCount(), BG.MaxCachedPerBatch); 156806c3fb27SDimitry Andric for (u16 I = 0; I < It.getCount(); ++I) 156906c3fb27SDimitry Andric DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase); 157006c3fb27SDimitry Andric } 157106c3fb27SDimitry Andric 157206c3fb27SDimitry Andric Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd, 157306c3fb27SDimitry Andric Region->RegionBeg, /*RegionIndex=*/0, 157406c3fb27SDimitry Andric Region->MemMapInfo.AllocatedUser); 157506c3fb27SDimitry Andric } else { 157606c3fb27SDimitry Andric DCHECK_LT(NumBlocks, MaxContainedBlocks); 157706c3fb27SDimitry Andric // Note that we don't always visit blocks in each BatchGroup so that we 157806c3fb27SDimitry Andric // may miss the chance of releasing certain pages that cross 157906c3fb27SDimitry Andric // BatchGroups. 158006c3fb27SDimitry Andric Context.markFreeBlocksInRegion( 158106c3fb27SDimitry Andric BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0, 158206c3fb27SDimitry Andric Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion); 158306c3fb27SDimitry Andric } 158406c3fb27SDimitry Andric } 158506c3fb27SDimitry Andric 158606c3fb27SDimitry Andric DCHECK(Context.hasBlockMarked()); 158706c3fb27SDimitry Andric 158806c3fb27SDimitry Andric return Context; 158906c3fb27SDimitry Andric } 159006c3fb27SDimitry Andric 159106c3fb27SDimitry Andric void mergeGroupsToReleaseBack(RegionInfo *Region, 15925f757f3fSDimitry Andric SinglyLinkedList<BatchGroupT> &GroupsToRelease) 15935f757f3fSDimitry Andric REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { 15945f757f3fSDimitry Andric ScopedLock L(Region->FLLock); 15955f757f3fSDimitry Andric 159606c3fb27SDimitry Andric // After merging two freelists, we may have redundant `BatchGroup`s that 159706c3fb27SDimitry Andric // need to be recycled. The number of unused `BatchGroup`s is expected to be 159806c3fb27SDimitry Andric // small. Pick a constant which is inferred from real programs. 159906c3fb27SDimitry Andric constexpr uptr MaxUnusedSize = 8; 160006c3fb27SDimitry Andric CompactPtrT Blocks[MaxUnusedSize]; 160106c3fb27SDimitry Andric u32 Idx = 0; 160206c3fb27SDimitry Andric RegionInfo *BatchClassRegion = getRegionInfo(SizeClassMap::BatchClassId); 160306c3fb27SDimitry Andric // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s 160406c3fb27SDimitry Andric // when we are manipulating the freelist of `BatchClassRegion`. Instead, we 160506c3fb27SDimitry Andric // should just push it back to the freelist when we merge two `BatchGroup`s. 160606c3fb27SDimitry Andric // This logic hasn't been implemented because we haven't supported releasing 160706c3fb27SDimitry Andric // pages in `BatchClassRegion`. 160806c3fb27SDimitry Andric DCHECK_NE(BatchClassRegion, Region); 160906c3fb27SDimitry Andric 161006c3fb27SDimitry Andric // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note 161106c3fb27SDimitry Andric // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are 161206c3fb27SDimitry Andric // sorted. 16135f757f3fSDimitry Andric for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(), 161406c3fb27SDimitry Andric *Prev = nullptr; 161506c3fb27SDimitry Andric ;) { 161606c3fb27SDimitry Andric if (BG == nullptr || GroupsToRelease.empty()) { 161706c3fb27SDimitry Andric if (!GroupsToRelease.empty()) 161806c3fb27SDimitry Andric Region->FreeListInfo.BlockList.append_back(&GroupsToRelease); 161906c3fb27SDimitry Andric break; 162006c3fb27SDimitry Andric } 162106c3fb27SDimitry Andric 162206c3fb27SDimitry Andric DCHECK(!BG->Batches.empty()); 162306c3fb27SDimitry Andric 162406c3fb27SDimitry Andric if (BG->CompactPtrGroupBase < 162506c3fb27SDimitry Andric GroupsToRelease.front()->CompactPtrGroupBase) { 162606c3fb27SDimitry Andric Prev = BG; 162706c3fb27SDimitry Andric BG = BG->Next; 1628bdd1243dSDimitry Andric continue; 1629bdd1243dSDimitry Andric } 1630bdd1243dSDimitry Andric 16315f757f3fSDimitry Andric BatchGroupT *Cur = GroupsToRelease.front(); 16325f757f3fSDimitry Andric TransferBatchT *UnusedTransferBatch = nullptr; 163306c3fb27SDimitry Andric GroupsToRelease.pop_front(); 163406c3fb27SDimitry Andric 163506c3fb27SDimitry Andric if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) { 163606c3fb27SDimitry Andric BG->PushedBlocks += Cur->PushedBlocks; 163706c3fb27SDimitry Andric // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while 163806c3fb27SDimitry Andric // collecting the `GroupsToRelease`. 163906c3fb27SDimitry Andric BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint; 164006c3fb27SDimitry Andric const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch; 164106c3fb27SDimitry Andric 164206c3fb27SDimitry Andric // Note that the first TransferBatches in both `Batches` may not be 164306c3fb27SDimitry Andric // full and only the first TransferBatch can have non-full blocks. Thus 164406c3fb27SDimitry Andric // we have to merge them before appending one to another. 164506c3fb27SDimitry Andric if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) { 164606c3fb27SDimitry Andric BG->Batches.append_back(&Cur->Batches); 164706c3fb27SDimitry Andric } else { 16485f757f3fSDimitry Andric TransferBatchT *NonFullBatch = Cur->Batches.front(); 164906c3fb27SDimitry Andric Cur->Batches.pop_front(); 165006c3fb27SDimitry Andric const u16 NonFullBatchCount = NonFullBatch->getCount(); 165106c3fb27SDimitry Andric // The remaining Batches in `Cur` are full. 165206c3fb27SDimitry Andric BG->Batches.append_back(&Cur->Batches); 165306c3fb27SDimitry Andric 165406c3fb27SDimitry Andric if (BG->Batches.front()->getCount() == MaxCachedPerBatch) { 165506c3fb27SDimitry Andric // Only 1 non-full TransferBatch, push it to the front. 165606c3fb27SDimitry Andric BG->Batches.push_front(NonFullBatch); 165706c3fb27SDimitry Andric } else { 165806c3fb27SDimitry Andric const u16 NumBlocksToMove = static_cast<u16>( 165906c3fb27SDimitry Andric Min(static_cast<u16>(MaxCachedPerBatch - 166006c3fb27SDimitry Andric BG->Batches.front()->getCount()), 166106c3fb27SDimitry Andric NonFullBatchCount)); 166206c3fb27SDimitry Andric BG->Batches.front()->appendFromTransferBatch(NonFullBatch, 166306c3fb27SDimitry Andric NumBlocksToMove); 166406c3fb27SDimitry Andric if (NonFullBatch->isEmpty()) 166506c3fb27SDimitry Andric UnusedTransferBatch = NonFullBatch; 166606c3fb27SDimitry Andric else 166706c3fb27SDimitry Andric BG->Batches.push_front(NonFullBatch); 166806c3fb27SDimitry Andric } 1669bdd1243dSDimitry Andric } 1670bdd1243dSDimitry Andric 167106c3fb27SDimitry Andric const u32 NeededSlots = UnusedTransferBatch == nullptr ? 1U : 2U; 167206c3fb27SDimitry Andric if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) { 167306c3fb27SDimitry Andric ScopedLock L(BatchClassRegion->FLLock); 167406c3fb27SDimitry Andric pushBatchClassBlocks(BatchClassRegion, Blocks, Idx); 16755f757f3fSDimitry Andric if (conditionVariableEnabled()) 16765f757f3fSDimitry Andric BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock); 167706c3fb27SDimitry Andric Idx = 0; 16780b57cec5SDimitry Andric } 167906c3fb27SDimitry Andric Blocks[Idx++] = 168006c3fb27SDimitry Andric compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(Cur)); 168106c3fb27SDimitry Andric if (UnusedTransferBatch) { 168206c3fb27SDimitry Andric Blocks[Idx++] = 168306c3fb27SDimitry Andric compactPtr(SizeClassMap::BatchClassId, 168406c3fb27SDimitry Andric reinterpret_cast<uptr>(UnusedTransferBatch)); 16850b57cec5SDimitry Andric } 168606c3fb27SDimitry Andric Prev = BG; 168706c3fb27SDimitry Andric BG = BG->Next; 168806c3fb27SDimitry Andric continue; 168906c3fb27SDimitry Andric } 169006c3fb27SDimitry Andric 169106c3fb27SDimitry Andric // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase 169206c3fb27SDimitry Andric // larger than the first element in `GroupsToRelease`. We need to insert 169306c3fb27SDimitry Andric // `GroupsToRelease::front()` (which is `Cur` below) before `BG`. 169406c3fb27SDimitry Andric // 169506c3fb27SDimitry Andric // 1. If `Prev` is nullptr, we simply push `Cur` to the front of 169606c3fb27SDimitry Andric // FreeListInfo.BlockList. 169706c3fb27SDimitry Andric // 2. Otherwise, use `insert()` which inserts an element next to `Prev`. 169806c3fb27SDimitry Andric // 169906c3fb27SDimitry Andric // Afterwards, we don't need to advance `BG` because the order between 170006c3fb27SDimitry Andric // `BG` and the new `GroupsToRelease::front()` hasn't been checked. 170106c3fb27SDimitry Andric if (Prev == nullptr) 170206c3fb27SDimitry Andric Region->FreeListInfo.BlockList.push_front(Cur); 170306c3fb27SDimitry Andric else 170406c3fb27SDimitry Andric Region->FreeListInfo.BlockList.insert(Prev, Cur); 170506c3fb27SDimitry Andric DCHECK_EQ(Cur->Next, BG); 170606c3fb27SDimitry Andric Prev = Cur; 170706c3fb27SDimitry Andric } 170806c3fb27SDimitry Andric 170906c3fb27SDimitry Andric if (Idx != 0) { 171006c3fb27SDimitry Andric ScopedLock L(BatchClassRegion->FLLock); 171106c3fb27SDimitry Andric pushBatchClassBlocks(BatchClassRegion, Blocks, Idx); 17125f757f3fSDimitry Andric if (conditionVariableEnabled()) 17135f757f3fSDimitry Andric BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock); 171406c3fb27SDimitry Andric } 171506c3fb27SDimitry Andric 171606c3fb27SDimitry Andric if (SCUDO_DEBUG) { 17175f757f3fSDimitry Andric BatchGroupT *Prev = Region->FreeListInfo.BlockList.front(); 17185f757f3fSDimitry Andric for (BatchGroupT *Cur = Prev->Next; Cur != nullptr; 171906c3fb27SDimitry Andric Prev = Cur, Cur = Cur->Next) { 172006c3fb27SDimitry Andric CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase); 172106c3fb27SDimitry Andric } 172206c3fb27SDimitry Andric } 17235f757f3fSDimitry Andric 17245f757f3fSDimitry Andric if (conditionVariableEnabled()) 17255f757f3fSDimitry Andric Region->FLLockCV.notifyAll(Region->FLLock); 172606c3fb27SDimitry Andric } 172706c3fb27SDimitry Andric 172806c3fb27SDimitry Andric // The minimum size of pushed blocks that we will try to release the pages in 172906c3fb27SDimitry Andric // that size class. 173006c3fb27SDimitry Andric uptr SmallerBlockReleasePageDelta = 0; 173106c3fb27SDimitry Andric atomic_s32 ReleaseToOsIntervalMs = {}; 173206c3fb27SDimitry Andric alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses]; 17330b57cec5SDimitry Andric }; 17340b57cec5SDimitry Andric 17350b57cec5SDimitry Andric } // namespace scudo 17360b57cec5SDimitry Andric 17370b57cec5SDimitry Andric #endif // SCUDO_PRIMARY64_H_ 1738