10b57cec5SDimitry Andric //===-- secondary.h ---------------------------------------------*- C++ -*-===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric 90b57cec5SDimitry Andric #ifndef SCUDO_SECONDARY_H_ 100b57cec5SDimitry Andric #define SCUDO_SECONDARY_H_ 110b57cec5SDimitry Andric 12fe6060f1SDimitry Andric #include "chunk.h" 130b57cec5SDimitry Andric #include "common.h" 14480093f4SDimitry Andric #include "list.h" 1506c3fb27SDimitry Andric #include "mem_map.h" 16fe6060f1SDimitry Andric #include "memtag.h" 170b57cec5SDimitry Andric #include "mutex.h" 18fe6060f1SDimitry Andric #include "options.h" 190b57cec5SDimitry Andric #include "stats.h" 2068d75effSDimitry Andric #include "string_utils.h" 2106c3fb27SDimitry Andric #include "thread_annotations.h" 220b57cec5SDimitry Andric 230b57cec5SDimitry Andric namespace scudo { 240b57cec5SDimitry Andric 250b57cec5SDimitry Andric // This allocator wraps the platform allocation primitives, and as such is on 260b57cec5SDimitry Andric // the slower side and should preferably be used for larger sized allocations. 270b57cec5SDimitry Andric // Blocks allocated will be preceded and followed by a guard page, and hold 280b57cec5SDimitry Andric // their own header that is not checksummed: the guard pages and the Combined 290b57cec5SDimitry Andric // header should be enough for our purpose. 300b57cec5SDimitry Andric 310b57cec5SDimitry Andric namespace LargeBlock { 320b57cec5SDimitry Andric 33fe6060f1SDimitry Andric struct alignas(Max<uptr>(archSupportsMemoryTagging() 34fe6060f1SDimitry Andric ? archMemoryTagGranuleSize() 35fe6060f1SDimitry Andric : 1, 36fe6060f1SDimitry Andric 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header { 370b57cec5SDimitry Andric LargeBlock::Header *Prev; 380b57cec5SDimitry Andric LargeBlock::Header *Next; 39fe6060f1SDimitry Andric uptr CommitBase; 40fe6060f1SDimitry Andric uptr CommitSize; 4106c3fb27SDimitry Andric MemMapT MemMap; 420b57cec5SDimitry Andric }; 430b57cec5SDimitry Andric 44fe6060f1SDimitry Andric static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, ""); 45fe6060f1SDimitry Andric static_assert(!archSupportsMemoryTagging() || 46fe6060f1SDimitry Andric sizeof(Header) % archMemoryTagGranuleSize() == 0, 47fe6060f1SDimitry Andric ""); 48fe6060f1SDimitry Andric 49fe6060f1SDimitry Andric constexpr uptr getHeaderSize() { return sizeof(Header); } 50fe6060f1SDimitry Andric 51fe6060f1SDimitry Andric template <typename Config> static uptr addHeaderTag(uptr Ptr) { 52fe6060f1SDimitry Andric if (allocatorSupportsMemoryTagging<Config>()) 53fe6060f1SDimitry Andric return addFixedTag(Ptr, 1); 54fe6060f1SDimitry Andric return Ptr; 550b57cec5SDimitry Andric } 560b57cec5SDimitry Andric 57fe6060f1SDimitry Andric template <typename Config> static Header *getHeader(uptr Ptr) { 58fe6060f1SDimitry Andric return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1; 590b57cec5SDimitry Andric } 600b57cec5SDimitry Andric 61fe6060f1SDimitry Andric template <typename Config> static Header *getHeader(const void *Ptr) { 62fe6060f1SDimitry Andric return getHeader<Config>(reinterpret_cast<uptr>(Ptr)); 630b57cec5SDimitry Andric } 640b57cec5SDimitry Andric 650b57cec5SDimitry Andric } // namespace LargeBlock 660b57cec5SDimitry Andric 6706c3fb27SDimitry Andric static inline void unmap(LargeBlock::Header *H) { 6806c3fb27SDimitry Andric // Note that the `H->MapMap` is stored on the pages managed by itself. Take 6906c3fb27SDimitry Andric // over the ownership before unmap() so that any operation along with unmap() 7006c3fb27SDimitry Andric // won't touch inaccessible pages. 7106c3fb27SDimitry Andric MemMapT MemMap = H->MemMap; 7206c3fb27SDimitry Andric MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); 73fe6060f1SDimitry Andric } 74fe6060f1SDimitry Andric 755f757f3fSDimitry Andric namespace { 765f757f3fSDimitry Andric struct CachedBlock { 775f757f3fSDimitry Andric uptr CommitBase = 0; 785f757f3fSDimitry Andric uptr CommitSize = 0; 795f757f3fSDimitry Andric uptr BlockBegin = 0; 805f757f3fSDimitry Andric MemMapT MemMap = {}; 815f757f3fSDimitry Andric u64 Time = 0; 825f757f3fSDimitry Andric 835f757f3fSDimitry Andric bool isValid() { return CommitBase != 0; } 845f757f3fSDimitry Andric 855f757f3fSDimitry Andric void invalidate() { CommitBase = 0; } 865f757f3fSDimitry Andric }; 875f757f3fSDimitry Andric } // namespace 885f757f3fSDimitry Andric 8906c3fb27SDimitry Andric template <typename Config> class MapAllocatorNoCache { 900b57cec5SDimitry Andric public: 915ffd83dbSDimitry Andric void init(UNUSED s32 ReleaseToOsInterval) {} 92fe6060f1SDimitry Andric bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment, 935f757f3fSDimitry Andric UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H, 945f757f3fSDimitry Andric UNUSED bool *Zeroed) { 955ffd83dbSDimitry Andric return false; 965ffd83dbSDimitry Andric } 97fe6060f1SDimitry Andric void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); } 98e8d8bef9SDimitry Andric bool canCache(UNUSED uptr Size) { return false; } 995ffd83dbSDimitry Andric void disable() {} 1005ffd83dbSDimitry Andric void enable() {} 1015ffd83dbSDimitry Andric void releaseToOS() {} 102fe6060f1SDimitry Andric void disableMemoryTagging() {} 103fe6060f1SDimitry Andric void unmapTestOnly() {} 104e8d8bef9SDimitry Andric bool setOption(Option O, UNUSED sptr Value) { 105e8d8bef9SDimitry Andric if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount || 106e8d8bef9SDimitry Andric O == Option::MaxCacheEntrySize) 107e8d8bef9SDimitry Andric return false; 108e8d8bef9SDimitry Andric // Not supported by the Secondary Cache, but not an error either. 109e8d8bef9SDimitry Andric return true; 110e8d8bef9SDimitry Andric } 11106c3fb27SDimitry Andric 11206c3fb27SDimitry Andric void getStats(UNUSED ScopedString *Str) { 11306c3fb27SDimitry Andric Str->append("Secondary Cache Disabled\n"); 11406c3fb27SDimitry Andric } 1155ffd83dbSDimitry Andric }; 116480093f4SDimitry Andric 117fe6060f1SDimitry Andric static const uptr MaxUnusedCachePages = 4U; 118fe6060f1SDimitry Andric 119fe6060f1SDimitry Andric template <typename Config> 1205f757f3fSDimitry Andric bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize, 12106c3fb27SDimitry Andric uptr AllocPos, uptr Flags, MemMapT &MemMap) { 1225f757f3fSDimitry Andric Flags |= MAP_RESIZABLE; 1235f757f3fSDimitry Andric Flags |= MAP_ALLOWNOMEM; 1245f757f3fSDimitry Andric 125*0fca6ea1SDimitry Andric const uptr PageSize = getPageSizeCached(); 126*0fca6ea1SDimitry Andric if (SCUDO_TRUSTY) { 127*0fca6ea1SDimitry Andric /* 128*0fca6ea1SDimitry Andric * On Trusty we need AllocPos to be usable for shared memory, which cannot 129*0fca6ea1SDimitry Andric * cross multiple mappings. This means we need to split around AllocPos 130*0fca6ea1SDimitry Andric * and not over it. We can only do this if the address is page-aligned. 131*0fca6ea1SDimitry Andric */ 132*0fca6ea1SDimitry Andric const uptr TaggedSize = AllocPos - CommitBase; 133*0fca6ea1SDimitry Andric if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) { 134*0fca6ea1SDimitry Andric DCHECK_GT(TaggedSize, 0); 135*0fca6ea1SDimitry Andric return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary", 136*0fca6ea1SDimitry Andric MAP_MEMTAG | Flags) && 137*0fca6ea1SDimitry Andric MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary", 138*0fca6ea1SDimitry Andric Flags); 139*0fca6ea1SDimitry Andric } else { 140*0fca6ea1SDimitry Andric const uptr RemapFlags = 141*0fca6ea1SDimitry Andric (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; 142*0fca6ea1SDimitry Andric return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", 143*0fca6ea1SDimitry Andric RemapFlags); 144*0fca6ea1SDimitry Andric } 145*0fca6ea1SDimitry Andric } 146*0fca6ea1SDimitry Andric 147*0fca6ea1SDimitry Andric const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize; 148fe6060f1SDimitry Andric if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) { 149fe6060f1SDimitry Andric const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes); 1505f757f3fSDimitry Andric return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary", 1515f757f3fSDimitry Andric MAP_MEMTAG | Flags) && 15206c3fb27SDimitry Andric MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos, 1535f757f3fSDimitry Andric "scudo:secondary", Flags); 154fe6060f1SDimitry Andric } else { 15506c3fb27SDimitry Andric const uptr RemapFlags = 1565f757f3fSDimitry Andric (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; 1575f757f3fSDimitry Andric return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags); 158fe6060f1SDimitry Andric } 159fe6060f1SDimitry Andric } 160fe6060f1SDimitry Andric 16181ad6265SDimitry Andric // Template specialization to avoid producing zero-length array 16281ad6265SDimitry Andric template <typename T, size_t Size> class NonZeroLengthArray { 16381ad6265SDimitry Andric public: 16481ad6265SDimitry Andric T &operator[](uptr Idx) { return values[Idx]; } 16581ad6265SDimitry Andric 16681ad6265SDimitry Andric private: 16781ad6265SDimitry Andric T values[Size]; 16881ad6265SDimitry Andric }; 16981ad6265SDimitry Andric template <typename T> class NonZeroLengthArray<T, 0> { 17081ad6265SDimitry Andric public: 17181ad6265SDimitry Andric T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); } 17281ad6265SDimitry Andric }; 17381ad6265SDimitry Andric 174e8d8bef9SDimitry Andric template <typename Config> class MapAllocatorCache { 1755ffd83dbSDimitry Andric public: 17606c3fb27SDimitry Andric void getStats(ScopedString *Str) { 17706c3fb27SDimitry Andric ScopedLock L(Mutex); 1785f757f3fSDimitry Andric uptr Integral; 1795f757f3fSDimitry Andric uptr Fractional; 1805f757f3fSDimitry Andric computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral, 1815f757f3fSDimitry Andric &Fractional); 182*0fca6ea1SDimitry Andric const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs); 183*0fca6ea1SDimitry Andric Str->append( 184*0fca6ea1SDimitry Andric "Stats: MapAllocatorCache: EntriesCount: %d, " 185*0fca6ea1SDimitry Andric "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n", 18606c3fb27SDimitry Andric EntriesCount, atomic_load_relaxed(&MaxEntriesCount), 187*0fca6ea1SDimitry Andric atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1); 1885f757f3fSDimitry Andric Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u " 1895f757f3fSDimitry Andric "(%zu.%02zu%%)\n", 1905f757f3fSDimitry Andric SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional); 19106c3fb27SDimitry Andric for (CachedBlock Entry : Entries) { 1925f757f3fSDimitry Andric if (!Entry.isValid()) 19306c3fb27SDimitry Andric continue; 19406c3fb27SDimitry Andric Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, " 1955f757f3fSDimitry Andric "BlockSize: %zu %s\n", 19606c3fb27SDimitry Andric Entry.CommitBase, Entry.CommitBase + Entry.CommitSize, 1975f757f3fSDimitry Andric Entry.CommitSize, Entry.Time == 0 ? "[R]" : ""); 19806c3fb27SDimitry Andric } 19906c3fb27SDimitry Andric } 20006c3fb27SDimitry Andric 201e8d8bef9SDimitry Andric // Ensure the default maximum specified fits the array. 202*0fca6ea1SDimitry Andric static_assert(Config::getDefaultMaxEntriesCount() <= 203*0fca6ea1SDimitry Andric Config::getEntriesArraySize(), 204e8d8bef9SDimitry Andric ""); 2055ffd83dbSDimitry Andric 20606c3fb27SDimitry Andric void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { 207fe6060f1SDimitry Andric DCHECK_EQ(EntriesCount, 0U); 208e8d8bef9SDimitry Andric setOption(Option::MaxCacheEntriesCount, 209*0fca6ea1SDimitry Andric static_cast<sptr>(Config::getDefaultMaxEntriesCount())); 210e8d8bef9SDimitry Andric setOption(Option::MaxCacheEntrySize, 211*0fca6ea1SDimitry Andric static_cast<sptr>(Config::getDefaultMaxEntrySize())); 212*0fca6ea1SDimitry Andric // The default value in the cache config has the higher priority. 213*0fca6ea1SDimitry Andric if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN) 214*0fca6ea1SDimitry Andric ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); 215e8d8bef9SDimitry Andric setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval)); 2165ffd83dbSDimitry Andric } 2175ffd83dbSDimitry Andric 2185f757f3fSDimitry Andric void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) { 219fe6060f1SDimitry Andric if (!canCache(H->CommitSize)) 220fe6060f1SDimitry Andric return unmap(H); 221fe6060f1SDimitry Andric 2225ffd83dbSDimitry Andric bool EntryCached = false; 2235ffd83dbSDimitry Andric bool EmptyCache = false; 224fe6060f1SDimitry Andric const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs); 22506c3fb27SDimitry Andric const u64 Time = getMonotonicTimeFast(); 226e8d8bef9SDimitry Andric const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount); 227fe6060f1SDimitry Andric CachedBlock Entry; 228fe6060f1SDimitry Andric Entry.CommitBase = H->CommitBase; 229fe6060f1SDimitry Andric Entry.CommitSize = H->CommitSize; 230fe6060f1SDimitry Andric Entry.BlockBegin = reinterpret_cast<uptr>(H + 1); 23106c3fb27SDimitry Andric Entry.MemMap = H->MemMap; 232fe6060f1SDimitry Andric Entry.Time = Time; 233fe6060f1SDimitry Andric if (useMemoryTagging<Config>(Options)) { 234fe6060f1SDimitry Andric if (Interval == 0 && !SCUDO_FUCHSIA) { 235fe6060f1SDimitry Andric // Release the memory and make it inaccessible at the same time by 236fe6060f1SDimitry Andric // creating a new MAP_NOACCESS mapping on top of the existing mapping. 237fe6060f1SDimitry Andric // Fuchsia does not support replacing mappings by creating a new mapping 238fe6060f1SDimitry Andric // on top so we just do the two syscalls there. 239fe6060f1SDimitry Andric Entry.Time = 0; 240fe6060f1SDimitry Andric mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize, 24106c3fb27SDimitry Andric Entry.CommitBase, MAP_NOACCESS, Entry.MemMap); 242fe6060f1SDimitry Andric } else { 24306c3fb27SDimitry Andric Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 24406c3fb27SDimitry Andric MAP_NOACCESS); 245fe6060f1SDimitry Andric } 246fe6060f1SDimitry Andric } else if (Interval == 0) { 2475f757f3fSDimitry Andric Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize); 248fe6060f1SDimitry Andric Entry.Time = 0; 249fe6060f1SDimitry Andric } 250fe6060f1SDimitry Andric do { 2515ffd83dbSDimitry Andric ScopedLock L(Mutex); 252fe6060f1SDimitry Andric if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) { 253fe6060f1SDimitry Andric // If we get here then memory tagging was disabled in between when we 254fe6060f1SDimitry Andric // read Options and when we locked Mutex. We can't insert our entry into 255fe6060f1SDimitry Andric // the quarantine or the cache because the permissions would be wrong so 256fe6060f1SDimitry Andric // just unmap it. 257fe6060f1SDimitry Andric break; 258fe6060f1SDimitry Andric } 259*0fca6ea1SDimitry Andric if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) { 260fe6060f1SDimitry Andric QuarantinePos = 261*0fca6ea1SDimitry Andric (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u); 2625f757f3fSDimitry Andric if (!Quarantine[QuarantinePos].isValid()) { 263fe6060f1SDimitry Andric Quarantine[QuarantinePos] = Entry; 264fe6060f1SDimitry Andric return; 265fe6060f1SDimitry Andric } 266fe6060f1SDimitry Andric CachedBlock PrevEntry = Quarantine[QuarantinePos]; 267fe6060f1SDimitry Andric Quarantine[QuarantinePos] = Entry; 268fe6060f1SDimitry Andric if (OldestTime == 0) 269fe6060f1SDimitry Andric OldestTime = Entry.Time; 270fe6060f1SDimitry Andric Entry = PrevEntry; 271fe6060f1SDimitry Andric } 272e8d8bef9SDimitry Andric if (EntriesCount >= MaxCount) { 2735ffd83dbSDimitry Andric if (IsFullEvents++ == 4U) 2745ffd83dbSDimitry Andric EmptyCache = true; 2755ffd83dbSDimitry Andric } else { 276e8d8bef9SDimitry Andric for (u32 I = 0; I < MaxCount; I++) { 2775f757f3fSDimitry Andric if (Entries[I].isValid()) 2785ffd83dbSDimitry Andric continue; 2795ffd83dbSDimitry Andric if (I != 0) 2805ffd83dbSDimitry Andric Entries[I] = Entries[0]; 281fe6060f1SDimitry Andric Entries[0] = Entry; 2825ffd83dbSDimitry Andric EntriesCount++; 283fe6060f1SDimitry Andric if (OldestTime == 0) 284fe6060f1SDimitry Andric OldestTime = Entry.Time; 2855ffd83dbSDimitry Andric EntryCached = true; 2865ffd83dbSDimitry Andric break; 2875ffd83dbSDimitry Andric } 2885ffd83dbSDimitry Andric } 289fe6060f1SDimitry Andric } while (0); 2905ffd83dbSDimitry Andric if (EmptyCache) 2915ffd83dbSDimitry Andric empty(); 292fe6060f1SDimitry Andric else if (Interval >= 0) 2935ffd83dbSDimitry Andric releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000); 294fe6060f1SDimitry Andric if (!EntryCached) 29506c3fb27SDimitry Andric Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity()); 2965ffd83dbSDimitry Andric } 2975ffd83dbSDimitry Andric 2985f757f3fSDimitry Andric bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize, 29906c3fb27SDimitry Andric LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) { 3005ffd83dbSDimitry Andric const uptr PageSize = getPageSizeCached(); 301e8d8bef9SDimitry Andric const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount); 3025f757f3fSDimitry Andric // 10% of the requested size proved to be the optimal choice for 3035f757f3fSDimitry Andric // retrieving cached blocks after testing several options. 3045f757f3fSDimitry Andric constexpr u32 FragmentedBytesDivisor = 10; 305fe6060f1SDimitry Andric bool Found = false; 306fe6060f1SDimitry Andric CachedBlock Entry; 3075f757f3fSDimitry Andric uptr EntryHeaderPos = 0; 308fe6060f1SDimitry Andric { 3095ffd83dbSDimitry Andric ScopedLock L(Mutex); 3105f757f3fSDimitry Andric CallsToRetrieve++; 3115ffd83dbSDimitry Andric if (EntriesCount == 0) 3125ffd83dbSDimitry Andric return false; 3135f757f3fSDimitry Andric u32 OptimalFitIndex = 0; 3145f757f3fSDimitry Andric uptr MinDiff = UINTPTR_MAX; 315e8d8bef9SDimitry Andric for (u32 I = 0; I < MaxCount; I++) { 3165f757f3fSDimitry Andric if (!Entries[I].isValid()) 3175ffd83dbSDimitry Andric continue; 3185f757f3fSDimitry Andric const uptr CommitBase = Entries[I].CommitBase; 319fe6060f1SDimitry Andric const uptr CommitSize = Entries[I].CommitSize; 320fe6060f1SDimitry Andric const uptr AllocPos = 32106c3fb27SDimitry Andric roundDown(CommitBase + CommitSize - Size, Alignment); 3225f757f3fSDimitry Andric const uptr HeaderPos = AllocPos - HeadersSize; 323fe6060f1SDimitry Andric if (HeaderPos > CommitBase + CommitSize) 3245ffd83dbSDimitry Andric continue; 325fe6060f1SDimitry Andric if (HeaderPos < CommitBase || 32606c3fb27SDimitry Andric AllocPos > CommitBase + PageSize * MaxUnusedCachePages) { 3275ffd83dbSDimitry Andric continue; 32806c3fb27SDimitry Andric } 329fe6060f1SDimitry Andric Found = true; 3305f757f3fSDimitry Andric const uptr Diff = HeaderPos - CommitBase; 3315f757f3fSDimitry Andric // immediately use a cached block if it's size is close enough to the 3325f757f3fSDimitry Andric // requested size. 3335f757f3fSDimitry Andric const uptr MaxAllowedFragmentedBytes = 3345f757f3fSDimitry Andric (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor; 3355f757f3fSDimitry Andric if (Diff <= MaxAllowedFragmentedBytes) { 3365f757f3fSDimitry Andric OptimalFitIndex = I; 3375f757f3fSDimitry Andric EntryHeaderPos = HeaderPos; 338fe6060f1SDimitry Andric break; 3395ffd83dbSDimitry Andric } 3405f757f3fSDimitry Andric // keep track of the smallest cached block 3415f757f3fSDimitry Andric // that is greater than (AllocSize + HeaderSize) 3425f757f3fSDimitry Andric if (Diff > MinDiff) 3435f757f3fSDimitry Andric continue; 3445f757f3fSDimitry Andric OptimalFitIndex = I; 3455f757f3fSDimitry Andric MinDiff = Diff; 3465f757f3fSDimitry Andric EntryHeaderPos = HeaderPos; 3475f757f3fSDimitry Andric } 3485f757f3fSDimitry Andric if (Found) { 3495f757f3fSDimitry Andric Entry = Entries[OptimalFitIndex]; 3505f757f3fSDimitry Andric Entries[OptimalFitIndex].invalidate(); 3515f757f3fSDimitry Andric EntriesCount--; 3525f757f3fSDimitry Andric SuccessfulRetrieves++; 3535f757f3fSDimitry Andric } 354fe6060f1SDimitry Andric } 35506c3fb27SDimitry Andric if (!Found) 35606c3fb27SDimitry Andric return false; 35706c3fb27SDimitry Andric 358fe6060f1SDimitry Andric *H = reinterpret_cast<LargeBlock::Header *>( 3595f757f3fSDimitry Andric LargeBlock::addHeaderTag<Config>(EntryHeaderPos)); 360fe6060f1SDimitry Andric *Zeroed = Entry.Time == 0; 361fe6060f1SDimitry Andric if (useMemoryTagging<Config>(Options)) 36206c3fb27SDimitry Andric Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0); 363fe6060f1SDimitry Andric uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1); 364fe6060f1SDimitry Andric if (useMemoryTagging<Config>(Options)) { 36506c3fb27SDimitry Andric if (*Zeroed) { 366fe6060f1SDimitry Andric storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase), 367fe6060f1SDimitry Andric NewBlockBegin); 36806c3fb27SDimitry Andric } else if (Entry.BlockBegin < NewBlockBegin) { 369fe6060f1SDimitry Andric storeTags(Entry.BlockBegin, NewBlockBegin); 37006c3fb27SDimitry Andric } else { 3715f757f3fSDimitry Andric storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin)); 372fe6060f1SDimitry Andric } 37306c3fb27SDimitry Andric } 374fe6060f1SDimitry Andric (*H)->CommitBase = Entry.CommitBase; 375fe6060f1SDimitry Andric (*H)->CommitSize = Entry.CommitSize; 37606c3fb27SDimitry Andric (*H)->MemMap = Entry.MemMap; 37706c3fb27SDimitry Andric return true; 3785ffd83dbSDimitry Andric } 3795ffd83dbSDimitry Andric 380e8d8bef9SDimitry Andric bool canCache(uptr Size) { 381e8d8bef9SDimitry Andric return atomic_load_relaxed(&MaxEntriesCount) != 0U && 382e8d8bef9SDimitry Andric Size <= atomic_load_relaxed(&MaxEntrySize); 3835ffd83dbSDimitry Andric } 3845ffd83dbSDimitry Andric 385e8d8bef9SDimitry Andric bool setOption(Option O, sptr Value) { 386e8d8bef9SDimitry Andric if (O == Option::ReleaseInterval) { 38706c3fb27SDimitry Andric const s32 Interval = Max( 388*0fca6ea1SDimitry Andric Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()), 389*0fca6ea1SDimitry Andric Config::getMinReleaseToOsIntervalMs()); 390e8d8bef9SDimitry Andric atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); 391e8d8bef9SDimitry Andric return true; 392fe6060f1SDimitry Andric } 393fe6060f1SDimitry Andric if (O == Option::MaxCacheEntriesCount) { 394*0fca6ea1SDimitry Andric if (Value < 0) 395e8d8bef9SDimitry Andric return false; 396*0fca6ea1SDimitry Andric atomic_store_relaxed( 397*0fca6ea1SDimitry Andric &MaxEntriesCount, 398*0fca6ea1SDimitry Andric Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize())); 399e8d8bef9SDimitry Andric return true; 400fe6060f1SDimitry Andric } 401fe6060f1SDimitry Andric if (O == Option::MaxCacheEntrySize) { 402e8d8bef9SDimitry Andric atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value)); 403e8d8bef9SDimitry Andric return true; 4045ffd83dbSDimitry Andric } 405e8d8bef9SDimitry Andric // Not supported by the Secondary Cache, but not an error either. 406e8d8bef9SDimitry Andric return true; 4075ffd83dbSDimitry Andric } 4085ffd83dbSDimitry Andric 4095ffd83dbSDimitry Andric void releaseToOS() { releaseOlderThan(UINT64_MAX); } 4105ffd83dbSDimitry Andric 41106c3fb27SDimitry Andric void disableMemoryTagging() EXCLUDES(Mutex) { 412fe6060f1SDimitry Andric ScopedLock L(Mutex); 413*0fca6ea1SDimitry Andric for (u32 I = 0; I != Config::getQuarantineSize(); ++I) { 4145f757f3fSDimitry Andric if (Quarantine[I].isValid()) { 41506c3fb27SDimitry Andric MemMapT &MemMap = Quarantine[I].MemMap; 41606c3fb27SDimitry Andric MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); 4175f757f3fSDimitry Andric Quarantine[I].invalidate(); 418fe6060f1SDimitry Andric } 419fe6060f1SDimitry Andric } 420fe6060f1SDimitry Andric const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount); 42106c3fb27SDimitry Andric for (u32 I = 0; I < MaxCount; I++) { 4225f757f3fSDimitry Andric if (Entries[I].isValid()) { 42306c3fb27SDimitry Andric Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase, 42406c3fb27SDimitry Andric Entries[I].CommitSize, 0); 42506c3fb27SDimitry Andric } 42606c3fb27SDimitry Andric } 427fe6060f1SDimitry Andric QuarantinePos = -1U; 428fe6060f1SDimitry Andric } 429fe6060f1SDimitry Andric 43006c3fb27SDimitry Andric void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); } 4315ffd83dbSDimitry Andric 43206c3fb27SDimitry Andric void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); } 4335ffd83dbSDimitry Andric 434fe6060f1SDimitry Andric void unmapTestOnly() { empty(); } 435fe6060f1SDimitry Andric 4365ffd83dbSDimitry Andric private: 4375ffd83dbSDimitry Andric void empty() { 438*0fca6ea1SDimitry Andric MemMapT MapInfo[Config::getEntriesArraySize()]; 4395ffd83dbSDimitry Andric uptr N = 0; 4405ffd83dbSDimitry Andric { 4415ffd83dbSDimitry Andric ScopedLock L(Mutex); 442*0fca6ea1SDimitry Andric for (uptr I = 0; I < Config::getEntriesArraySize(); I++) { 4435f757f3fSDimitry Andric if (!Entries[I].isValid()) 4445ffd83dbSDimitry Andric continue; 44506c3fb27SDimitry Andric MapInfo[N] = Entries[I].MemMap; 4465f757f3fSDimitry Andric Entries[I].invalidate(); 4475ffd83dbSDimitry Andric N++; 4485ffd83dbSDimitry Andric } 4495ffd83dbSDimitry Andric EntriesCount = 0; 4505ffd83dbSDimitry Andric IsFullEvents = 0; 4515ffd83dbSDimitry Andric } 45206c3fb27SDimitry Andric for (uptr I = 0; I < N; I++) { 45306c3fb27SDimitry Andric MemMapT &MemMap = MapInfo[I]; 45406c3fb27SDimitry Andric MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); 45506c3fb27SDimitry Andric } 4565ffd83dbSDimitry Andric } 4575ffd83dbSDimitry Andric 45806c3fb27SDimitry Andric void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) { 4595f757f3fSDimitry Andric if (!Entry.isValid() || !Entry.Time) 460fe6060f1SDimitry Andric return; 461fe6060f1SDimitry Andric if (Entry.Time > Time) { 462fe6060f1SDimitry Andric if (OldestTime == 0 || Entry.Time < OldestTime) 463fe6060f1SDimitry Andric OldestTime = Entry.Time; 464fe6060f1SDimitry Andric return; 465fe6060f1SDimitry Andric } 4665f757f3fSDimitry Andric Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize); 467fe6060f1SDimitry Andric Entry.Time = 0; 468fe6060f1SDimitry Andric } 469fe6060f1SDimitry Andric 47006c3fb27SDimitry Andric void releaseOlderThan(u64 Time) EXCLUDES(Mutex) { 471fe6060f1SDimitry Andric ScopedLock L(Mutex); 472fe6060f1SDimitry Andric if (!EntriesCount || OldestTime == 0 || OldestTime > Time) 473fe6060f1SDimitry Andric return; 474fe6060f1SDimitry Andric OldestTime = 0; 475*0fca6ea1SDimitry Andric for (uptr I = 0; I < Config::getQuarantineSize(); I++) 476fe6060f1SDimitry Andric releaseIfOlderThan(Quarantine[I], Time); 477*0fca6ea1SDimitry Andric for (uptr I = 0; I < Config::getEntriesArraySize(); I++) 478fe6060f1SDimitry Andric releaseIfOlderThan(Entries[I], Time); 479fe6060f1SDimitry Andric } 480fe6060f1SDimitry Andric 4815ffd83dbSDimitry Andric HybridMutex Mutex; 48206c3fb27SDimitry Andric u32 EntriesCount GUARDED_BY(Mutex) = 0; 48306c3fb27SDimitry Andric u32 QuarantinePos GUARDED_BY(Mutex) = 0; 484fe6060f1SDimitry Andric atomic_u32 MaxEntriesCount = {}; 485fe6060f1SDimitry Andric atomic_uptr MaxEntrySize = {}; 48606c3fb27SDimitry Andric u64 OldestTime GUARDED_BY(Mutex) = 0; 48706c3fb27SDimitry Andric u32 IsFullEvents GUARDED_BY(Mutex) = 0; 488fe6060f1SDimitry Andric atomic_s32 ReleaseToOsIntervalMs = {}; 4895f757f3fSDimitry Andric u32 CallsToRetrieve GUARDED_BY(Mutex) = 0; 4905f757f3fSDimitry Andric u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0; 491fe6060f1SDimitry Andric 492*0fca6ea1SDimitry Andric CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {}; 493*0fca6ea1SDimitry Andric NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()> 49406c3fb27SDimitry Andric Quarantine GUARDED_BY(Mutex) = {}; 4955ffd83dbSDimitry Andric }; 4965ffd83dbSDimitry Andric 497e8d8bef9SDimitry Andric template <typename Config> class MapAllocator { 4985ffd83dbSDimitry Andric public: 49906c3fb27SDimitry Andric void init(GlobalStats *S, 50006c3fb27SDimitry Andric s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS { 501fe6060f1SDimitry Andric DCHECK_EQ(AllocatedBytes, 0U); 502fe6060f1SDimitry Andric DCHECK_EQ(FreedBytes, 0U); 503fe6060f1SDimitry Andric Cache.init(ReleaseToOsInterval); 504fe6060f1SDimitry Andric Stats.init(); 50568d75effSDimitry Andric if (LIKELY(S)) 5060b57cec5SDimitry Andric S->link(&Stats); 5070b57cec5SDimitry Andric } 5080b57cec5SDimitry Andric 5095f757f3fSDimitry Andric void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0, 510fe6060f1SDimitry Andric uptr *BlockEnd = nullptr, 5115ffd83dbSDimitry Andric FillContentsMode FillContents = NoFill); 5120b57cec5SDimitry Andric 5135f757f3fSDimitry Andric void deallocate(const Options &Options, void *Ptr); 5140b57cec5SDimitry Andric 5150b57cec5SDimitry Andric static uptr getBlockEnd(void *Ptr) { 516fe6060f1SDimitry Andric auto *B = LargeBlock::getHeader<Config>(Ptr); 517fe6060f1SDimitry Andric return B->CommitBase + B->CommitSize; 5180b57cec5SDimitry Andric } 5190b57cec5SDimitry Andric 5200b57cec5SDimitry Andric static uptr getBlockSize(void *Ptr) { 5210b57cec5SDimitry Andric return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr); 5220b57cec5SDimitry Andric } 5230b57cec5SDimitry Andric 5245f757f3fSDimitry Andric static constexpr uptr getHeadersSize() { 5255f757f3fSDimitry Andric return Chunk::getHeaderSize() + LargeBlock::getHeaderSize(); 5265f757f3fSDimitry Andric } 5275f757f3fSDimitry Andric 52806c3fb27SDimitry Andric void disable() NO_THREAD_SAFETY_ANALYSIS { 5295ffd83dbSDimitry Andric Mutex.lock(); 5305ffd83dbSDimitry Andric Cache.disable(); 5315ffd83dbSDimitry Andric } 5320b57cec5SDimitry Andric 53306c3fb27SDimitry Andric void enable() NO_THREAD_SAFETY_ANALYSIS { 5345ffd83dbSDimitry Andric Cache.enable(); 5355ffd83dbSDimitry Andric Mutex.unlock(); 5365ffd83dbSDimitry Andric } 5370b57cec5SDimitry Andric 5380b57cec5SDimitry Andric template <typename F> void iterateOverBlocks(F Callback) const { 53906c3fb27SDimitry Andric Mutex.assertHeld(); 54006c3fb27SDimitry Andric 541fe6060f1SDimitry Andric for (const auto &H : InUseBlocks) { 542fe6060f1SDimitry Andric uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize(); 543fe6060f1SDimitry Andric if (allocatorSupportsMemoryTagging<Config>()) 544fe6060f1SDimitry Andric Ptr = untagPointer(Ptr); 545fe6060f1SDimitry Andric Callback(Ptr); 546fe6060f1SDimitry Andric } 5470b57cec5SDimitry Andric } 5480b57cec5SDimitry Andric 54981ad6265SDimitry Andric bool canCache(uptr Size) { return Cache.canCache(Size); } 5505ffd83dbSDimitry Andric 551e8d8bef9SDimitry Andric bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); } 5525ffd83dbSDimitry Andric 5535ffd83dbSDimitry Andric void releaseToOS() { Cache.releaseToOS(); } 554480093f4SDimitry Andric 555fe6060f1SDimitry Andric void disableMemoryTagging() { Cache.disableMemoryTagging(); } 556fe6060f1SDimitry Andric 557fe6060f1SDimitry Andric void unmapTestOnly() { Cache.unmapTestOnly(); } 558fe6060f1SDimitry Andric 55906c3fb27SDimitry Andric void getStats(ScopedString *Str); 5605ffd83dbSDimitry Andric 56106c3fb27SDimitry Andric private: 562*0fca6ea1SDimitry Andric typename Config::template CacheT<typename Config::CacheConfig> Cache; 56306c3fb27SDimitry Andric 56406c3fb27SDimitry Andric mutable HybridMutex Mutex; 56506c3fb27SDimitry Andric DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex); 56606c3fb27SDimitry Andric uptr AllocatedBytes GUARDED_BY(Mutex) = 0; 56706c3fb27SDimitry Andric uptr FreedBytes GUARDED_BY(Mutex) = 0; 5685f757f3fSDimitry Andric uptr FragmentedBytes GUARDED_BY(Mutex) = 0; 56906c3fb27SDimitry Andric uptr LargestSize GUARDED_BY(Mutex) = 0; 57006c3fb27SDimitry Andric u32 NumberOfAllocs GUARDED_BY(Mutex) = 0; 57106c3fb27SDimitry Andric u32 NumberOfFrees GUARDED_BY(Mutex) = 0; 57206c3fb27SDimitry Andric LocalStats Stats GUARDED_BY(Mutex); 5730b57cec5SDimitry Andric }; 5740b57cec5SDimitry Andric 575480093f4SDimitry Andric // As with the Primary, the size passed to this function includes any desired 576480093f4SDimitry Andric // alignment, so that the frontend can align the user allocation. The hint 577480093f4SDimitry Andric // parameter allows us to unmap spurious memory when dealing with larger 578480093f4SDimitry Andric // (greater than a page) alignments on 32-bit platforms. 579480093f4SDimitry Andric // Due to the sparsity of address space available on those platforms, requesting 580480093f4SDimitry Andric // an allocation from the Secondary with a large alignment would end up wasting 581480093f4SDimitry Andric // VA space (even though we are not committing the whole thing), hence the need 582480093f4SDimitry Andric // to trim off some of the reserved space. 583480093f4SDimitry Andric // For allocations requested with an alignment greater than or equal to a page, 584480093f4SDimitry Andric // the committed memory will amount to something close to Size - AlignmentHint 585480093f4SDimitry Andric // (pending rounding and headers). 586e8d8bef9SDimitry Andric template <typename Config> 5875f757f3fSDimitry Andric void *MapAllocator<Config>::allocate(const Options &Options, uptr Size, 5885f757f3fSDimitry Andric uptr Alignment, uptr *BlockEndPtr, 5895ffd83dbSDimitry Andric FillContentsMode FillContents) { 590fe6060f1SDimitry Andric if (Options.get(OptionBit::AddLargeAllocationSlack)) 591fe6060f1SDimitry Andric Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG; 592349cc55cSDimitry Andric Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG); 593480093f4SDimitry Andric const uptr PageSize = getPageSizeCached(); 594480093f4SDimitry Andric 5955f757f3fSDimitry Andric // Note that cached blocks may have aligned address already. Thus we simply 5965f757f3fSDimitry Andric // pass the required size (`Size` + `getHeadersSize()`) to do cache look up. 5975f757f3fSDimitry Andric const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize); 5985f757f3fSDimitry Andric 5995f757f3fSDimitry Andric if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) { 6005ffd83dbSDimitry Andric LargeBlock::Header *H; 601e8d8bef9SDimitry Andric bool Zeroed; 6025f757f3fSDimitry Andric if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H, 6035f757f3fSDimitry Andric &Zeroed)) { 604fe6060f1SDimitry Andric const uptr BlockEnd = H->CommitBase + H->CommitSize; 605fe6060f1SDimitry Andric if (BlockEndPtr) 606fe6060f1SDimitry Andric *BlockEndPtr = BlockEnd; 607fe6060f1SDimitry Andric uptr HInt = reinterpret_cast<uptr>(H); 608fe6060f1SDimitry Andric if (allocatorSupportsMemoryTagging<Config>()) 609fe6060f1SDimitry Andric HInt = untagPointer(HInt); 610fe6060f1SDimitry Andric const uptr PtrInt = HInt + LargeBlock::getHeaderSize(); 611fe6060f1SDimitry Andric void *Ptr = reinterpret_cast<void *>(PtrInt); 612e8d8bef9SDimitry Andric if (FillContents && !Zeroed) 6135ffd83dbSDimitry Andric memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte, 614fe6060f1SDimitry Andric BlockEnd - PtrInt); 6155ffd83dbSDimitry Andric { 6165ffd83dbSDimitry Andric ScopedLock L(Mutex); 6175ffd83dbSDimitry Andric InUseBlocks.push_back(H); 61806c3fb27SDimitry Andric AllocatedBytes += H->CommitSize; 6195f757f3fSDimitry Andric FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize; 6205ffd83dbSDimitry Andric NumberOfAllocs++; 62106c3fb27SDimitry Andric Stats.add(StatAllocated, H->CommitSize); 62206c3fb27SDimitry Andric Stats.add(StatMapped, H->MemMap.getCapacity()); 6235ffd83dbSDimitry Andric } 624480093f4SDimitry Andric return Ptr; 625480093f4SDimitry Andric } 626480093f4SDimitry Andric } 627480093f4SDimitry Andric 6285f757f3fSDimitry Andric uptr RoundedSize = 6295f757f3fSDimitry Andric roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize); 6305f757f3fSDimitry Andric if (Alignment > PageSize) 6315f757f3fSDimitry Andric RoundedSize += Alignment - PageSize; 6325f757f3fSDimitry Andric 63306c3fb27SDimitry Andric ReservedMemoryT ReservedMemory; 634480093f4SDimitry Andric const uptr MapSize = RoundedSize + 2 * PageSize; 6355f757f3fSDimitry Andric if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, 6365f757f3fSDimitry Andric MAP_ALLOWNOMEM))) { 6375f757f3fSDimitry Andric return nullptr; 6385f757f3fSDimitry Andric } 63906c3fb27SDimitry Andric 64006c3fb27SDimitry Andric // Take the entire ownership of reserved region. 64106c3fb27SDimitry Andric MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(), 64206c3fb27SDimitry Andric ReservedMemory.getCapacity()); 64306c3fb27SDimitry Andric uptr MapBase = MemMap.getBase(); 644480093f4SDimitry Andric uptr CommitBase = MapBase + PageSize; 645480093f4SDimitry Andric uptr MapEnd = MapBase + MapSize; 646480093f4SDimitry Andric 647480093f4SDimitry Andric // In the unlikely event of alignments larger than a page, adjust the amount 648480093f4SDimitry Andric // of memory we want to commit, and trim the extra memory. 649fe6060f1SDimitry Andric if (UNLIKELY(Alignment >= PageSize)) { 650480093f4SDimitry Andric // For alignments greater than or equal to a page, the user pointer (eg: the 651480093f4SDimitry Andric // pointer that is returned by the C or C++ allocation APIs) ends up on a 652480093f4SDimitry Andric // page boundary , and our headers will live in the preceding page. 65306c3fb27SDimitry Andric CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize; 654480093f4SDimitry Andric const uptr NewMapBase = CommitBase - PageSize; 655480093f4SDimitry Andric DCHECK_GE(NewMapBase, MapBase); 656480093f4SDimitry Andric // We only trim the extra memory on 32-bit platforms: 64-bit platforms 657480093f4SDimitry Andric // are less constrained memory wise, and that saves us two syscalls. 658480093f4SDimitry Andric if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) { 65906c3fb27SDimitry Andric MemMap.unmap(MapBase, NewMapBase - MapBase); 660480093f4SDimitry Andric MapBase = NewMapBase; 661480093f4SDimitry Andric } 662fe6060f1SDimitry Andric const uptr NewMapEnd = 66306c3fb27SDimitry Andric CommitBase + PageSize + roundUp(Size, PageSize) + PageSize; 664480093f4SDimitry Andric DCHECK_LE(NewMapEnd, MapEnd); 665480093f4SDimitry Andric if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) { 66606c3fb27SDimitry Andric MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd); 667480093f4SDimitry Andric MapEnd = NewMapEnd; 668480093f4SDimitry Andric } 669480093f4SDimitry Andric } 670480093f4SDimitry Andric 671480093f4SDimitry Andric const uptr CommitSize = MapEnd - PageSize - CommitBase; 67206c3fb27SDimitry Andric const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment); 6735f757f3fSDimitry Andric if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, 6745f757f3fSDimitry Andric MemMap)) { 6755f757f3fSDimitry Andric MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); 6765f757f3fSDimitry Andric return nullptr; 6775f757f3fSDimitry Andric } 6785f757f3fSDimitry Andric const uptr HeaderPos = AllocPos - getHeadersSize(); 679fe6060f1SDimitry Andric LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>( 680fe6060f1SDimitry Andric LargeBlock::addHeaderTag<Config>(HeaderPos)); 681fe6060f1SDimitry Andric if (useMemoryTagging<Config>(Options)) 682fe6060f1SDimitry Andric storeTags(LargeBlock::addHeaderTag<Config>(CommitBase), 683fe6060f1SDimitry Andric reinterpret_cast<uptr>(H + 1)); 684fe6060f1SDimitry Andric H->CommitBase = CommitBase; 685fe6060f1SDimitry Andric H->CommitSize = CommitSize; 68606c3fb27SDimitry Andric H->MemMap = MemMap; 687fe6060f1SDimitry Andric if (BlockEndPtr) 688fe6060f1SDimitry Andric *BlockEndPtr = CommitBase + CommitSize; 689480093f4SDimitry Andric { 690480093f4SDimitry Andric ScopedLock L(Mutex); 691480093f4SDimitry Andric InUseBlocks.push_back(H); 692480093f4SDimitry Andric AllocatedBytes += CommitSize; 6935f757f3fSDimitry Andric FragmentedBytes += H->MemMap.getCapacity() - CommitSize; 694480093f4SDimitry Andric if (LargestSize < CommitSize) 695480093f4SDimitry Andric LargestSize = CommitSize; 696480093f4SDimitry Andric NumberOfAllocs++; 697480093f4SDimitry Andric Stats.add(StatAllocated, CommitSize); 69806c3fb27SDimitry Andric Stats.add(StatMapped, H->MemMap.getCapacity()); 699480093f4SDimitry Andric } 700fe6060f1SDimitry Andric return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize()); 701480093f4SDimitry Andric } 702480093f4SDimitry Andric 703fe6060f1SDimitry Andric template <typename Config> 7045f757f3fSDimitry Andric void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr) 70506c3fb27SDimitry Andric EXCLUDES(Mutex) { 706fe6060f1SDimitry Andric LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr); 707fe6060f1SDimitry Andric const uptr CommitSize = H->CommitSize; 708480093f4SDimitry Andric { 709480093f4SDimitry Andric ScopedLock L(Mutex); 710480093f4SDimitry Andric InUseBlocks.remove(H); 711480093f4SDimitry Andric FreedBytes += CommitSize; 7125f757f3fSDimitry Andric FragmentedBytes -= H->MemMap.getCapacity() - CommitSize; 713480093f4SDimitry Andric NumberOfFrees++; 714480093f4SDimitry Andric Stats.sub(StatAllocated, CommitSize); 71506c3fb27SDimitry Andric Stats.sub(StatMapped, H->MemMap.getCapacity()); 716480093f4SDimitry Andric } 717fe6060f1SDimitry Andric Cache.store(Options, H); 718480093f4SDimitry Andric } 719480093f4SDimitry Andric 720e8d8bef9SDimitry Andric template <typename Config> 72106c3fb27SDimitry Andric void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) { 72206c3fb27SDimitry Andric ScopedLock L(Mutex); 723349cc55cSDimitry Andric Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times " 7245f757f3fSDimitry Andric "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n", 725349cc55cSDimitry Andric NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, 726349cc55cSDimitry Andric FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, 7275f757f3fSDimitry Andric (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20, 7285f757f3fSDimitry Andric FragmentedBytes >> 10); 72906c3fb27SDimitry Andric Cache.getStats(Str); 730480093f4SDimitry Andric } 731480093f4SDimitry Andric 7320b57cec5SDimitry Andric } // namespace scudo 7330b57cec5SDimitry Andric 7340b57cec5SDimitry Andric #endif // SCUDO_SECONDARY_H_ 735