xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/secondary.h (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- secondary.h ---------------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick 
93cab2bb3Spatrick #ifndef SCUDO_SECONDARY_H_
103cab2bb3Spatrick #define SCUDO_SECONDARY_H_
113cab2bb3Spatrick 
12d89ec533Spatrick #include "chunk.h"
133cab2bb3Spatrick #include "common.h"
143cab2bb3Spatrick #include "list.h"
15d89ec533Spatrick #include "memtag.h"
163cab2bb3Spatrick #include "mutex.h"
17d89ec533Spatrick #include "options.h"
183cab2bb3Spatrick #include "stats.h"
193cab2bb3Spatrick #include "string_utils.h"
203cab2bb3Spatrick 
213cab2bb3Spatrick namespace scudo {
223cab2bb3Spatrick 
233cab2bb3Spatrick // This allocator wraps the platform allocation primitives, and as such is on
243cab2bb3Spatrick // the slower side and should preferably be used for larger sized allocations.
253cab2bb3Spatrick // Blocks allocated will be preceded and followed by a guard page, and hold
263cab2bb3Spatrick // their own header that is not checksummed: the guard pages and the Combined
273cab2bb3Spatrick // header should be enough for our purpose.
283cab2bb3Spatrick 
293cab2bb3Spatrick namespace LargeBlock {
303cab2bb3Spatrick 
31d89ec533Spatrick struct alignas(Max<uptr>(archSupportsMemoryTagging()
32d89ec533Spatrick                              ? archMemoryTagGranuleSize()
33d89ec533Spatrick                              : 1,
34d89ec533Spatrick                          1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
353cab2bb3Spatrick   LargeBlock::Header *Prev;
363cab2bb3Spatrick   LargeBlock::Header *Next;
37d89ec533Spatrick   uptr CommitBase;
38d89ec533Spatrick   uptr CommitSize;
393cab2bb3Spatrick   uptr MapBase;
403cab2bb3Spatrick   uptr MapSize;
41d89ec533Spatrick   [[no_unique_address]] MapPlatformData Data;
423cab2bb3Spatrick };
433cab2bb3Spatrick 
44d89ec533Spatrick static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
45d89ec533Spatrick static_assert(!archSupportsMemoryTagging() ||
46d89ec533Spatrick                   sizeof(Header) % archMemoryTagGranuleSize() == 0,
47d89ec533Spatrick               "");
48d89ec533Spatrick 
getHeaderSize()49d89ec533Spatrick constexpr uptr getHeaderSize() { return sizeof(Header); }
50d89ec533Spatrick 
addHeaderTag(uptr Ptr)51d89ec533Spatrick template <typename Config> static uptr addHeaderTag(uptr Ptr) {
52d89ec533Spatrick   if (allocatorSupportsMemoryTagging<Config>())
53d89ec533Spatrick     return addFixedTag(Ptr, 1);
54d89ec533Spatrick   return Ptr;
553cab2bb3Spatrick }
563cab2bb3Spatrick 
getHeader(uptr Ptr)57d89ec533Spatrick template <typename Config> static Header *getHeader(uptr Ptr) {
58d89ec533Spatrick   return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
593cab2bb3Spatrick }
603cab2bb3Spatrick 
getHeader(const void * Ptr)61d89ec533Spatrick template <typename Config> static Header *getHeader(const void *Ptr) {
62d89ec533Spatrick   return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
633cab2bb3Spatrick }
643cab2bb3Spatrick 
653cab2bb3Spatrick } // namespace LargeBlock
663cab2bb3Spatrick 
unmap(LargeBlock::Header * H)67d89ec533Spatrick static void unmap(LargeBlock::Header *H) {
68d89ec533Spatrick   MapPlatformData Data = H->Data;
69d89ec533Spatrick   unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
70d89ec533Spatrick }
71d89ec533Spatrick 
721f9cb04fSpatrick class MapAllocatorNoCache {
733cab2bb3Spatrick public:
init(UNUSED s32 ReleaseToOsInterval)741f9cb04fSpatrick   void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED Options Options,UNUSED uptr Size,UNUSED uptr Alignment,UNUSED LargeBlock::Header ** H,UNUSED bool * Zeroed)75d89ec533Spatrick   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
76d89ec533Spatrick                 UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
771f9cb04fSpatrick     return false;
781f9cb04fSpatrick   }
store(UNUSED Options Options,LargeBlock::Header * H)79d89ec533Spatrick   void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
canCache(UNUSED uptr Size)80d89ec533Spatrick   bool canCache(UNUSED uptr Size) { return false; }
disable()811f9cb04fSpatrick   void disable() {}
enable()821f9cb04fSpatrick   void enable() {}
releaseToOS()831f9cb04fSpatrick   void releaseToOS() {}
disableMemoryTagging()84d89ec533Spatrick   void disableMemoryTagging() {}
unmapTestOnly()85d89ec533Spatrick   void unmapTestOnly() {}
setOption(Option O,UNUSED sptr Value)86d89ec533Spatrick   bool setOption(Option O, UNUSED sptr Value) {
87d89ec533Spatrick     if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
88d89ec533Spatrick         O == Option::MaxCacheEntrySize)
89d89ec533Spatrick       return false;
90d89ec533Spatrick     // Not supported by the Secondary Cache, but not an error either.
91d89ec533Spatrick     return true;
92d89ec533Spatrick   }
931f9cb04fSpatrick };
943cab2bb3Spatrick 
95d89ec533Spatrick static const uptr MaxUnusedCachePages = 4U;
96d89ec533Spatrick 
97d89ec533Spatrick template <typename Config>
mapSecondary(Options Options,uptr CommitBase,uptr CommitSize,uptr AllocPos,uptr Flags,MapPlatformData * Data)98d89ec533Spatrick void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
99d89ec533Spatrick                   uptr AllocPos, uptr Flags, MapPlatformData *Data) {
100d89ec533Spatrick   const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
101d89ec533Spatrick   if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
102d89ec533Spatrick     const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
103d89ec533Spatrick     map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
104d89ec533Spatrick         "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
105d89ec533Spatrick     map(reinterpret_cast<void *>(UntaggedPos),
106d89ec533Spatrick         CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
107d89ec533Spatrick         MAP_RESIZABLE | Flags, Data);
108d89ec533Spatrick   } else {
109d89ec533Spatrick     map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
110d89ec533Spatrick         MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
111d89ec533Spatrick             Flags,
112d89ec533Spatrick         Data);
113d89ec533Spatrick   }
114d89ec533Spatrick }
115d89ec533Spatrick 
116*810390e3Srobert // Template specialization to avoid producing zero-length array
117*810390e3Srobert template <typename T, size_t Size> class NonZeroLengthArray {
118*810390e3Srobert public:
119*810390e3Srobert   T &operator[](uptr Idx) { return values[Idx]; }
120*810390e3Srobert 
121*810390e3Srobert private:
122*810390e3Srobert   T values[Size];
123*810390e3Srobert };
124*810390e3Srobert template <typename T> class NonZeroLengthArray<T, 0> {
125*810390e3Srobert public:
126*810390e3Srobert   T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
127*810390e3Srobert };
128*810390e3Srobert 
129d89ec533Spatrick template <typename Config> class MapAllocatorCache {
1301f9cb04fSpatrick public:
131d89ec533Spatrick   // Ensure the default maximum specified fits the array.
132d89ec533Spatrick   static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
133d89ec533Spatrick                     Config::SecondaryCacheEntriesArraySize,
134d89ec533Spatrick                 "");
1351f9cb04fSpatrick 
init(s32 ReleaseToOsInterval)1361f9cb04fSpatrick   void init(s32 ReleaseToOsInterval) {
137d89ec533Spatrick     DCHECK_EQ(EntriesCount, 0U);
138d89ec533Spatrick     setOption(Option::MaxCacheEntriesCount,
139d89ec533Spatrick               static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
140d89ec533Spatrick     setOption(Option::MaxCacheEntrySize,
141d89ec533Spatrick               static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
142d89ec533Spatrick     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
1431f9cb04fSpatrick   }
1441f9cb04fSpatrick 
store(Options Options,LargeBlock::Header * H)145d89ec533Spatrick   void store(Options Options, LargeBlock::Header *H) {
146d89ec533Spatrick     if (!canCache(H->CommitSize))
147d89ec533Spatrick       return unmap(H);
148d89ec533Spatrick 
1491f9cb04fSpatrick     bool EntryCached = false;
1501f9cb04fSpatrick     bool EmptyCache = false;
151d89ec533Spatrick     const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
1521f9cb04fSpatrick     const u64 Time = getMonotonicTime();
153d89ec533Spatrick     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
154d89ec533Spatrick     CachedBlock Entry;
155d89ec533Spatrick     Entry.CommitBase = H->CommitBase;
156d89ec533Spatrick     Entry.CommitSize = H->CommitSize;
157d89ec533Spatrick     Entry.MapBase = H->MapBase;
158d89ec533Spatrick     Entry.MapSize = H->MapSize;
159d89ec533Spatrick     Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
160d89ec533Spatrick     Entry.Data = H->Data;
161d89ec533Spatrick     Entry.Time = Time;
162d89ec533Spatrick     if (useMemoryTagging<Config>(Options)) {
163d89ec533Spatrick       if (Interval == 0 && !SCUDO_FUCHSIA) {
164d89ec533Spatrick         // Release the memory and make it inaccessible at the same time by
165d89ec533Spatrick         // creating a new MAP_NOACCESS mapping on top of the existing mapping.
166d89ec533Spatrick         // Fuchsia does not support replacing mappings by creating a new mapping
167d89ec533Spatrick         // on top so we just do the two syscalls there.
168d89ec533Spatrick         Entry.Time = 0;
169d89ec533Spatrick         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
170d89ec533Spatrick                              Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
171d89ec533Spatrick       } else {
172d89ec533Spatrick         setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
173d89ec533Spatrick                             &Entry.Data);
174d89ec533Spatrick       }
175d89ec533Spatrick     } else if (Interval == 0) {
176d89ec533Spatrick       releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
177d89ec533Spatrick       Entry.Time = 0;
178d89ec533Spatrick     }
179d89ec533Spatrick     do {
1801f9cb04fSpatrick       ScopedLock L(Mutex);
181d89ec533Spatrick       if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
182d89ec533Spatrick         // If we get here then memory tagging was disabled in between when we
183d89ec533Spatrick         // read Options and when we locked Mutex. We can't insert our entry into
184d89ec533Spatrick         // the quarantine or the cache because the permissions would be wrong so
185d89ec533Spatrick         // just unmap it.
186d89ec533Spatrick         break;
187d89ec533Spatrick       }
188d89ec533Spatrick       if (Config::SecondaryCacheQuarantineSize &&
189d89ec533Spatrick           useMemoryTagging<Config>(Options)) {
190d89ec533Spatrick         QuarantinePos =
191d89ec533Spatrick             (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
192d89ec533Spatrick         if (!Quarantine[QuarantinePos].CommitBase) {
193d89ec533Spatrick           Quarantine[QuarantinePos] = Entry;
194d89ec533Spatrick           return;
195d89ec533Spatrick         }
196d89ec533Spatrick         CachedBlock PrevEntry = Quarantine[QuarantinePos];
197d89ec533Spatrick         Quarantine[QuarantinePos] = Entry;
198d89ec533Spatrick         if (OldestTime == 0)
199d89ec533Spatrick           OldestTime = Entry.Time;
200d89ec533Spatrick         Entry = PrevEntry;
201d89ec533Spatrick       }
202d89ec533Spatrick       if (EntriesCount >= MaxCount) {
2031f9cb04fSpatrick         if (IsFullEvents++ == 4U)
2041f9cb04fSpatrick           EmptyCache = true;
2051f9cb04fSpatrick       } else {
206d89ec533Spatrick         for (u32 I = 0; I < MaxCount; I++) {
207d89ec533Spatrick           if (Entries[I].CommitBase)
2081f9cb04fSpatrick             continue;
2091f9cb04fSpatrick           if (I != 0)
2101f9cb04fSpatrick             Entries[I] = Entries[0];
211d89ec533Spatrick           Entries[0] = Entry;
2121f9cb04fSpatrick           EntriesCount++;
213d89ec533Spatrick           if (OldestTime == 0)
214d89ec533Spatrick             OldestTime = Entry.Time;
2151f9cb04fSpatrick           EntryCached = true;
2161f9cb04fSpatrick           break;
2171f9cb04fSpatrick         }
2181f9cb04fSpatrick       }
219d89ec533Spatrick     } while (0);
2201f9cb04fSpatrick     if (EmptyCache)
2211f9cb04fSpatrick       empty();
222d89ec533Spatrick     else if (Interval >= 0)
2231f9cb04fSpatrick       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
224d89ec533Spatrick     if (!EntryCached)
225d89ec533Spatrick       unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
226d89ec533Spatrick             &Entry.Data);
2271f9cb04fSpatrick   }
2281f9cb04fSpatrick 
retrieve(Options Options,uptr Size,uptr Alignment,LargeBlock::Header ** H,bool * Zeroed)229d89ec533Spatrick   bool retrieve(Options Options, uptr Size, uptr Alignment,
230d89ec533Spatrick                 LargeBlock::Header **H, bool *Zeroed) {
2311f9cb04fSpatrick     const uptr PageSize = getPageSizeCached();
232d89ec533Spatrick     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
233d89ec533Spatrick     bool Found = false;
234d89ec533Spatrick     CachedBlock Entry;
235*810390e3Srobert     uptr HeaderPos = 0;
236d89ec533Spatrick     {
2371f9cb04fSpatrick       ScopedLock L(Mutex);
2381f9cb04fSpatrick       if (EntriesCount == 0)
2391f9cb04fSpatrick         return false;
240d89ec533Spatrick       for (u32 I = 0; I < MaxCount; I++) {
241d89ec533Spatrick         const uptr CommitBase = Entries[I].CommitBase;
242d89ec533Spatrick         if (!CommitBase)
2431f9cb04fSpatrick           continue;
244d89ec533Spatrick         const uptr CommitSize = Entries[I].CommitSize;
245d89ec533Spatrick         const uptr AllocPos =
246d89ec533Spatrick             roundDownTo(CommitBase + CommitSize - Size, Alignment);
247d89ec533Spatrick         HeaderPos =
248d89ec533Spatrick             AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
249d89ec533Spatrick         if (HeaderPos > CommitBase + CommitSize)
2501f9cb04fSpatrick           continue;
251d89ec533Spatrick         if (HeaderPos < CommitBase ||
252d89ec533Spatrick             AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
2531f9cb04fSpatrick           continue;
254d89ec533Spatrick         Found = true;
255d89ec533Spatrick         Entry = Entries[I];
256d89ec533Spatrick         Entries[I].CommitBase = 0;
257d89ec533Spatrick         break;
258d89ec533Spatrick       }
259d89ec533Spatrick     }
260d89ec533Spatrick     if (Found) {
261d89ec533Spatrick       *H = reinterpret_cast<LargeBlock::Header *>(
262d89ec533Spatrick           LargeBlock::addHeaderTag<Config>(HeaderPos));
263d89ec533Spatrick       *Zeroed = Entry.Time == 0;
264d89ec533Spatrick       if (useMemoryTagging<Config>(Options))
265d89ec533Spatrick         setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
266d89ec533Spatrick       uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
267d89ec533Spatrick       if (useMemoryTagging<Config>(Options)) {
268d89ec533Spatrick         if (*Zeroed)
269d89ec533Spatrick           storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
270d89ec533Spatrick                     NewBlockBegin);
271d89ec533Spatrick         else if (Entry.BlockBegin < NewBlockBegin)
272d89ec533Spatrick           storeTags(Entry.BlockBegin, NewBlockBegin);
273d89ec533Spatrick         else
274d89ec533Spatrick           storeTags(untagPointer(NewBlockBegin),
275d89ec533Spatrick                     untagPointer(Entry.BlockBegin));
276d89ec533Spatrick       }
277d89ec533Spatrick       (*H)->CommitBase = Entry.CommitBase;
278d89ec533Spatrick       (*H)->CommitSize = Entry.CommitSize;
279d89ec533Spatrick       (*H)->MapBase = Entry.MapBase;
280d89ec533Spatrick       (*H)->MapSize = Entry.MapSize;
281d89ec533Spatrick       (*H)->Data = Entry.Data;
2821f9cb04fSpatrick       EntriesCount--;
283d89ec533Spatrick     }
284d89ec533Spatrick     return Found;
285d89ec533Spatrick   }
286d89ec533Spatrick 
canCache(uptr Size)287d89ec533Spatrick   bool canCache(uptr Size) {
288d89ec533Spatrick     return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
289d89ec533Spatrick            Size <= atomic_load_relaxed(&MaxEntrySize);
290d89ec533Spatrick   }
291d89ec533Spatrick 
setOption(Option O,sptr Value)292d89ec533Spatrick   bool setOption(Option O, sptr Value) {
293d89ec533Spatrick     if (O == Option::ReleaseInterval) {
294d89ec533Spatrick       const s32 Interval =
295d89ec533Spatrick           Max(Min(static_cast<s32>(Value),
296d89ec533Spatrick                   Config::SecondaryCacheMaxReleaseToOsIntervalMs),
297d89ec533Spatrick               Config::SecondaryCacheMinReleaseToOsIntervalMs);
298d89ec533Spatrick       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
2991f9cb04fSpatrick       return true;
3001f9cb04fSpatrick     }
301d89ec533Spatrick     if (O == Option::MaxCacheEntriesCount) {
302d89ec533Spatrick       const u32 MaxCount = static_cast<u32>(Value);
303d89ec533Spatrick       if (MaxCount > Config::SecondaryCacheEntriesArraySize)
3041f9cb04fSpatrick         return false;
305d89ec533Spatrick       atomic_store_relaxed(&MaxEntriesCount, MaxCount);
306d89ec533Spatrick       return true;
3071f9cb04fSpatrick     }
308d89ec533Spatrick     if (O == Option::MaxCacheEntrySize) {
309d89ec533Spatrick       atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
310d89ec533Spatrick       return true;
3111f9cb04fSpatrick     }
312d89ec533Spatrick     // Not supported by the Secondary Cache, but not an error either.
313d89ec533Spatrick     return true;
3141f9cb04fSpatrick   }
3151f9cb04fSpatrick 
releaseToOS()3161f9cb04fSpatrick   void releaseToOS() { releaseOlderThan(UINT64_MAX); }
3171f9cb04fSpatrick 
disableMemoryTagging()318d89ec533Spatrick   void disableMemoryTagging() {
319d89ec533Spatrick     ScopedLock L(Mutex);
320d89ec533Spatrick     for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
321d89ec533Spatrick       if (Quarantine[I].CommitBase) {
322d89ec533Spatrick         unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
323d89ec533Spatrick               Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
324d89ec533Spatrick         Quarantine[I].CommitBase = 0;
325d89ec533Spatrick       }
326d89ec533Spatrick     }
327d89ec533Spatrick     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
328d89ec533Spatrick     for (u32 I = 0; I < MaxCount; I++)
329d89ec533Spatrick       if (Entries[I].CommitBase)
330d89ec533Spatrick         setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
331d89ec533Spatrick                             &Entries[I].Data);
332d89ec533Spatrick     QuarantinePos = -1U;
333d89ec533Spatrick   }
334d89ec533Spatrick 
disable()3351f9cb04fSpatrick   void disable() { Mutex.lock(); }
3361f9cb04fSpatrick 
enable()3371f9cb04fSpatrick   void enable() { Mutex.unlock(); }
3381f9cb04fSpatrick 
unmapTestOnly()339d89ec533Spatrick   void unmapTestOnly() { empty(); }
340d89ec533Spatrick 
3411f9cb04fSpatrick private:
empty()3421f9cb04fSpatrick   void empty() {
3431f9cb04fSpatrick     struct {
3441f9cb04fSpatrick       void *MapBase;
3451f9cb04fSpatrick       uptr MapSize;
3461f9cb04fSpatrick       MapPlatformData Data;
347d89ec533Spatrick     } MapInfo[Config::SecondaryCacheEntriesArraySize];
3481f9cb04fSpatrick     uptr N = 0;
3491f9cb04fSpatrick     {
3501f9cb04fSpatrick       ScopedLock L(Mutex);
351d89ec533Spatrick       for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
352d89ec533Spatrick         if (!Entries[I].CommitBase)
3531f9cb04fSpatrick           continue;
3541f9cb04fSpatrick         MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
3551f9cb04fSpatrick         MapInfo[N].MapSize = Entries[I].MapSize;
3561f9cb04fSpatrick         MapInfo[N].Data = Entries[I].Data;
357d89ec533Spatrick         Entries[I].CommitBase = 0;
3581f9cb04fSpatrick         N++;
3591f9cb04fSpatrick       }
3601f9cb04fSpatrick       EntriesCount = 0;
3611f9cb04fSpatrick       IsFullEvents = 0;
3621f9cb04fSpatrick     }
3631f9cb04fSpatrick     for (uptr I = 0; I < N; I++)
3641f9cb04fSpatrick       unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
3651f9cb04fSpatrick             &MapInfo[I].Data);
3661f9cb04fSpatrick   }
3671f9cb04fSpatrick 
3681f9cb04fSpatrick   struct CachedBlock {
369d89ec533Spatrick     uptr CommitBase;
370d89ec533Spatrick     uptr CommitSize;
3711f9cb04fSpatrick     uptr MapBase;
3721f9cb04fSpatrick     uptr MapSize;
373d89ec533Spatrick     uptr BlockBegin;
374d89ec533Spatrick     [[no_unique_address]] MapPlatformData Data;
3751f9cb04fSpatrick     u64 Time;
3761f9cb04fSpatrick   };
3771f9cb04fSpatrick 
releaseIfOlderThan(CachedBlock & Entry,u64 Time)378d89ec533Spatrick   void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
379d89ec533Spatrick     if (!Entry.CommitBase || !Entry.Time)
380d89ec533Spatrick       return;
381d89ec533Spatrick     if (Entry.Time > Time) {
382d89ec533Spatrick       if (OldestTime == 0 || Entry.Time < OldestTime)
383d89ec533Spatrick         OldestTime = Entry.Time;
384d89ec533Spatrick       return;
385d89ec533Spatrick     }
386d89ec533Spatrick     releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
387d89ec533Spatrick     Entry.Time = 0;
388d89ec533Spatrick   }
389d89ec533Spatrick 
releaseOlderThan(u64 Time)390d89ec533Spatrick   void releaseOlderThan(u64 Time) {
391d89ec533Spatrick     ScopedLock L(Mutex);
392d89ec533Spatrick     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
393d89ec533Spatrick       return;
394d89ec533Spatrick     OldestTime = 0;
395d89ec533Spatrick     for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
396d89ec533Spatrick       releaseIfOlderThan(Quarantine[I], Time);
397d89ec533Spatrick     for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
398d89ec533Spatrick       releaseIfOlderThan(Entries[I], Time);
399d89ec533Spatrick   }
400d89ec533Spatrick 
4011f9cb04fSpatrick   HybridMutex Mutex;
402d89ec533Spatrick   u32 EntriesCount = 0;
403d89ec533Spatrick   u32 QuarantinePos = 0;
404d89ec533Spatrick   atomic_u32 MaxEntriesCount = {};
405d89ec533Spatrick   atomic_uptr MaxEntrySize = {};
406d89ec533Spatrick   u64 OldestTime = 0;
407d89ec533Spatrick   u32 IsFullEvents = 0;
408d89ec533Spatrick   atomic_s32 ReleaseToOsIntervalMs = {};
409d89ec533Spatrick 
410d89ec533Spatrick   CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
411*810390e3Srobert   NonZeroLengthArray<CachedBlock, Config::SecondaryCacheQuarantineSize>
412*810390e3Srobert       Quarantine = {};
4131f9cb04fSpatrick };
4141f9cb04fSpatrick 
415d89ec533Spatrick template <typename Config> class MapAllocator {
4161f9cb04fSpatrick public:
417d89ec533Spatrick   void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
418d89ec533Spatrick     DCHECK_EQ(AllocatedBytes, 0U);
419d89ec533Spatrick     DCHECK_EQ(FreedBytes, 0U);
420d89ec533Spatrick     Cache.init(ReleaseToOsInterval);
421d89ec533Spatrick     Stats.init();
4223cab2bb3Spatrick     if (LIKELY(S))
4233cab2bb3Spatrick       S->link(&Stats);
4243cab2bb3Spatrick   }
4253cab2bb3Spatrick 
426d89ec533Spatrick   void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
427d89ec533Spatrick                  uptr *BlockEnd = nullptr,
4281f9cb04fSpatrick                  FillContentsMode FillContents = NoFill);
4293cab2bb3Spatrick 
430d89ec533Spatrick   void deallocate(Options Options, void *Ptr);
4313cab2bb3Spatrick 
getBlockEnd(void * Ptr)4323cab2bb3Spatrick   static uptr getBlockEnd(void *Ptr) {
433d89ec533Spatrick     auto *B = LargeBlock::getHeader<Config>(Ptr);
434d89ec533Spatrick     return B->CommitBase + B->CommitSize;
4353cab2bb3Spatrick   }
4363cab2bb3Spatrick 
getBlockSize(void * Ptr)4373cab2bb3Spatrick   static uptr getBlockSize(void *Ptr) {
4383cab2bb3Spatrick     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
4393cab2bb3Spatrick   }
4403cab2bb3Spatrick 
4413cab2bb3Spatrick   void getStats(ScopedString *Str) const;
4423cab2bb3Spatrick 
disable()4431f9cb04fSpatrick   void disable() {
4441f9cb04fSpatrick     Mutex.lock();
4451f9cb04fSpatrick     Cache.disable();
4461f9cb04fSpatrick   }
4473cab2bb3Spatrick 
enable()4481f9cb04fSpatrick   void enable() {
4491f9cb04fSpatrick     Cache.enable();
4501f9cb04fSpatrick     Mutex.unlock();
4511f9cb04fSpatrick   }
4523cab2bb3Spatrick 
iterateOverBlocks(F Callback)4533cab2bb3Spatrick   template <typename F> void iterateOverBlocks(F Callback) const {
454d89ec533Spatrick     for (const auto &H : InUseBlocks) {
455d89ec533Spatrick       uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
456d89ec533Spatrick       if (allocatorSupportsMemoryTagging<Config>())
457d89ec533Spatrick         Ptr = untagPointer(Ptr);
458d89ec533Spatrick       Callback(Ptr);
459d89ec533Spatrick     }
4603cab2bb3Spatrick   }
4613cab2bb3Spatrick 
canCache(uptr Size)462*810390e3Srobert   bool canCache(uptr Size) { return Cache.canCache(Size); }
4631f9cb04fSpatrick 
setOption(Option O,sptr Value)464d89ec533Spatrick   bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
4651f9cb04fSpatrick 
releaseToOS()4661f9cb04fSpatrick   void releaseToOS() { Cache.releaseToOS(); }
4673cab2bb3Spatrick 
disableMemoryTagging()468d89ec533Spatrick   void disableMemoryTagging() { Cache.disableMemoryTagging(); }
469d89ec533Spatrick 
unmapTestOnly()470d89ec533Spatrick   void unmapTestOnly() { Cache.unmapTestOnly(); }
471d89ec533Spatrick 
4723cab2bb3Spatrick private:
473d89ec533Spatrick   typename Config::SecondaryCache Cache;
4741f9cb04fSpatrick 
4753cab2bb3Spatrick   HybridMutex Mutex;
4763cab2bb3Spatrick   DoublyLinkedList<LargeBlock::Header> InUseBlocks;
477d89ec533Spatrick   uptr AllocatedBytes = 0;
478d89ec533Spatrick   uptr FreedBytes = 0;
479d89ec533Spatrick   uptr LargestSize = 0;
480d89ec533Spatrick   u32 NumberOfAllocs = 0;
481d89ec533Spatrick   u32 NumberOfFrees = 0;
4823cab2bb3Spatrick   LocalStats Stats;
4833cab2bb3Spatrick };
4843cab2bb3Spatrick 
4853cab2bb3Spatrick // As with the Primary, the size passed to this function includes any desired
4863cab2bb3Spatrick // alignment, so that the frontend can align the user allocation. The hint
4873cab2bb3Spatrick // parameter allows us to unmap spurious memory when dealing with larger
4883cab2bb3Spatrick // (greater than a page) alignments on 32-bit platforms.
4893cab2bb3Spatrick // Due to the sparsity of address space available on those platforms, requesting
4903cab2bb3Spatrick // an allocation from the Secondary with a large alignment would end up wasting
4913cab2bb3Spatrick // VA space (even though we are not committing the whole thing), hence the need
4923cab2bb3Spatrick // to trim off some of the reserved space.
4933cab2bb3Spatrick // For allocations requested with an alignment greater than or equal to a page,
4943cab2bb3Spatrick // the committed memory will amount to something close to Size - AlignmentHint
4953cab2bb3Spatrick // (pending rounding and headers).
496d89ec533Spatrick template <typename Config>
allocate(Options Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)497d89ec533Spatrick void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
498d89ec533Spatrick                                      uptr *BlockEndPtr,
4991f9cb04fSpatrick                                      FillContentsMode FillContents) {
500d89ec533Spatrick   if (Options.get(OptionBit::AddLargeAllocationSlack))
501d89ec533Spatrick     Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
502*810390e3Srobert   Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
5033cab2bb3Spatrick   const uptr PageSize = getPageSizeCached();
504d89ec533Spatrick   uptr RoundedSize =
505d89ec533Spatrick       roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
506d89ec533Spatrick                     Chunk::getHeaderSize(),
507d89ec533Spatrick                 PageSize);
508d89ec533Spatrick   if (Alignment > PageSize)
509d89ec533Spatrick     RoundedSize += Alignment - PageSize;
5103cab2bb3Spatrick 
511d89ec533Spatrick   if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
5121f9cb04fSpatrick     LargeBlock::Header *H;
513d89ec533Spatrick     bool Zeroed;
514d89ec533Spatrick     if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
515d89ec533Spatrick       const uptr BlockEnd = H->CommitBase + H->CommitSize;
516d89ec533Spatrick       if (BlockEndPtr)
517d89ec533Spatrick         *BlockEndPtr = BlockEnd;
518d89ec533Spatrick       uptr HInt = reinterpret_cast<uptr>(H);
519d89ec533Spatrick       if (allocatorSupportsMemoryTagging<Config>())
520d89ec533Spatrick         HInt = untagPointer(HInt);
521d89ec533Spatrick       const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
522d89ec533Spatrick       void *Ptr = reinterpret_cast<void *>(PtrInt);
523d89ec533Spatrick       if (FillContents && !Zeroed)
5241f9cb04fSpatrick         memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
525d89ec533Spatrick                BlockEnd - PtrInt);
526d89ec533Spatrick       const uptr BlockSize = BlockEnd - HInt;
5271f9cb04fSpatrick       {
5281f9cb04fSpatrick         ScopedLock L(Mutex);
5291f9cb04fSpatrick         InUseBlocks.push_back(H);
5301f9cb04fSpatrick         AllocatedBytes += BlockSize;
5311f9cb04fSpatrick         NumberOfAllocs++;
5321f9cb04fSpatrick         Stats.add(StatAllocated, BlockSize);
5331f9cb04fSpatrick         Stats.add(StatMapped, H->MapSize);
5341f9cb04fSpatrick       }
5353cab2bb3Spatrick       return Ptr;
5363cab2bb3Spatrick     }
5373cab2bb3Spatrick   }
5383cab2bb3Spatrick 
5393cab2bb3Spatrick   MapPlatformData Data = {};
5403cab2bb3Spatrick   const uptr MapSize = RoundedSize + 2 * PageSize;
541d89ec533Spatrick   uptr MapBase = reinterpret_cast<uptr>(
542d89ec533Spatrick       map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
5433cab2bb3Spatrick   if (UNLIKELY(!MapBase))
5443cab2bb3Spatrick     return nullptr;
5453cab2bb3Spatrick   uptr CommitBase = MapBase + PageSize;
5463cab2bb3Spatrick   uptr MapEnd = MapBase + MapSize;
5473cab2bb3Spatrick 
5483cab2bb3Spatrick   // In the unlikely event of alignments larger than a page, adjust the amount
5493cab2bb3Spatrick   // of memory we want to commit, and trim the extra memory.
550d89ec533Spatrick   if (UNLIKELY(Alignment >= PageSize)) {
5513cab2bb3Spatrick     // For alignments greater than or equal to a page, the user pointer (eg: the
5523cab2bb3Spatrick     // pointer that is returned by the C or C++ allocation APIs) ends up on a
5533cab2bb3Spatrick     // page boundary , and our headers will live in the preceding page.
554d89ec533Spatrick     CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
5553cab2bb3Spatrick     const uptr NewMapBase = CommitBase - PageSize;
5563cab2bb3Spatrick     DCHECK_GE(NewMapBase, MapBase);
5573cab2bb3Spatrick     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
5583cab2bb3Spatrick     // are less constrained memory wise, and that saves us two syscalls.
5593cab2bb3Spatrick     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
5603cab2bb3Spatrick       unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
5613cab2bb3Spatrick       MapBase = NewMapBase;
5623cab2bb3Spatrick     }
563d89ec533Spatrick     const uptr NewMapEnd =
564d89ec533Spatrick         CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
5653cab2bb3Spatrick     DCHECK_LE(NewMapEnd, MapEnd);
5663cab2bb3Spatrick     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
5673cab2bb3Spatrick       unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
5683cab2bb3Spatrick       MapEnd = NewMapEnd;
5693cab2bb3Spatrick     }
5703cab2bb3Spatrick   }
5713cab2bb3Spatrick 
5723cab2bb3Spatrick   const uptr CommitSize = MapEnd - PageSize - CommitBase;
573d89ec533Spatrick   const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
574d89ec533Spatrick   mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
575d89ec533Spatrick   const uptr HeaderPos =
576d89ec533Spatrick       AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
577d89ec533Spatrick   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
578d89ec533Spatrick       LargeBlock::addHeaderTag<Config>(HeaderPos));
579d89ec533Spatrick   if (useMemoryTagging<Config>(Options))
580d89ec533Spatrick     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
581d89ec533Spatrick               reinterpret_cast<uptr>(H + 1));
5823cab2bb3Spatrick   H->MapBase = MapBase;
5833cab2bb3Spatrick   H->MapSize = MapEnd - MapBase;
584d89ec533Spatrick   H->CommitBase = CommitBase;
585d89ec533Spatrick   H->CommitSize = CommitSize;
5863cab2bb3Spatrick   H->Data = Data;
587d89ec533Spatrick   if (BlockEndPtr)
588d89ec533Spatrick     *BlockEndPtr = CommitBase + CommitSize;
5893cab2bb3Spatrick   {
5903cab2bb3Spatrick     ScopedLock L(Mutex);
5913cab2bb3Spatrick     InUseBlocks.push_back(H);
5923cab2bb3Spatrick     AllocatedBytes += CommitSize;
5933cab2bb3Spatrick     if (LargestSize < CommitSize)
5943cab2bb3Spatrick       LargestSize = CommitSize;
5953cab2bb3Spatrick     NumberOfAllocs++;
5963cab2bb3Spatrick     Stats.add(StatAllocated, CommitSize);
5973cab2bb3Spatrick     Stats.add(StatMapped, H->MapSize);
5983cab2bb3Spatrick   }
599d89ec533Spatrick   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
6003cab2bb3Spatrick }
6013cab2bb3Spatrick 
602d89ec533Spatrick template <typename Config>
deallocate(Options Options,void * Ptr)603d89ec533Spatrick void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
604d89ec533Spatrick   LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
605d89ec533Spatrick   const uptr CommitSize = H->CommitSize;
6063cab2bb3Spatrick   {
6073cab2bb3Spatrick     ScopedLock L(Mutex);
6083cab2bb3Spatrick     InUseBlocks.remove(H);
6093cab2bb3Spatrick     FreedBytes += CommitSize;
6103cab2bb3Spatrick     NumberOfFrees++;
6113cab2bb3Spatrick     Stats.sub(StatAllocated, CommitSize);
6123cab2bb3Spatrick     Stats.sub(StatMapped, H->MapSize);
6133cab2bb3Spatrick   }
614d89ec533Spatrick   Cache.store(Options, H);
6153cab2bb3Spatrick }
6163cab2bb3Spatrick 
617d89ec533Spatrick template <typename Config>
getStats(ScopedString * Str)618d89ec533Spatrick void MapAllocator<Config>::getStats(ScopedString *Str) const {
619*810390e3Srobert   Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
620*810390e3Srobert               "(%zuK), remains %u (%zuK) max %zuM\n",
621*810390e3Srobert               NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
622*810390e3Srobert               FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
623*810390e3Srobert               (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
6243cab2bb3Spatrick }
6253cab2bb3Spatrick 
6263cab2bb3Spatrick } // namespace scudo
6273cab2bb3Spatrick 
6283cab2bb3Spatrick #endif // SCUDO_SECONDARY_H_
629