xref: /openbsd-src/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // Part of the Sanitizer Allocator.
103cab2bb3Spatrick //
113cab2bb3Spatrick //===----------------------------------------------------------------------===//
123cab2bb3Spatrick #ifndef SANITIZER_ALLOCATOR_H
133cab2bb3Spatrick #error This file must be included inside sanitizer_allocator.h
143cab2bb3Spatrick #endif
153cab2bb3Spatrick 
163cab2bb3Spatrick // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
173cab2bb3Spatrick // allocated chunks. To be used in memory constrained or not memory hungry cases
183cab2bb3Spatrick // (currently, 32 bits and internal allocator).
193cab2bb3Spatrick class LargeMmapAllocatorPtrArrayStatic {
203cab2bb3Spatrick  public:
Init()21d89ec533Spatrick   inline void *Init() { return &p_[0]; }
EnsureSpace(uptr n)22d89ec533Spatrick   inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
233cab2bb3Spatrick  private:
243cab2bb3Spatrick   static const int kMaxNumChunks = 1 << 15;
253cab2bb3Spatrick   uptr p_[kMaxNumChunks];
263cab2bb3Spatrick };
273cab2bb3Spatrick 
283cab2bb3Spatrick // Much less restricted LargeMmapAllocator chunks list (comparing to
293cab2bb3Spatrick // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
303cab2bb3Spatrick // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
313cab2bb3Spatrick // same functionality in Fuchsia case, which does not support MAP_NORESERVE.
323cab2bb3Spatrick class LargeMmapAllocatorPtrArrayDynamic {
333cab2bb3Spatrick  public:
Init()34d89ec533Spatrick   inline void *Init() {
353cab2bb3Spatrick     uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
363cab2bb3Spatrick                                  SecondaryAllocatorName);
373cab2bb3Spatrick     CHECK(p);
383cab2bb3Spatrick     return reinterpret_cast<void*>(p);
393cab2bb3Spatrick   }
403cab2bb3Spatrick 
EnsureSpace(uptr n)41d89ec533Spatrick   inline void EnsureSpace(uptr n) {
423cab2bb3Spatrick     CHECK_LT(n, kMaxNumChunks);
433cab2bb3Spatrick     DCHECK(n <= n_reserved_);
443cab2bb3Spatrick     if (UNLIKELY(n == n_reserved_)) {
453cab2bb3Spatrick       address_range_.MapOrDie(
463cab2bb3Spatrick           reinterpret_cast<uptr>(address_range_.base()) +
473cab2bb3Spatrick               n_reserved_ * sizeof(uptr),
483cab2bb3Spatrick           kChunksBlockCount * sizeof(uptr));
493cab2bb3Spatrick       n_reserved_ += kChunksBlockCount;
503cab2bb3Spatrick     }
513cab2bb3Spatrick   }
523cab2bb3Spatrick 
533cab2bb3Spatrick  private:
543cab2bb3Spatrick   static const int kMaxNumChunks = 1 << 20;
553cab2bb3Spatrick   static const int kChunksBlockCount = 1 << 14;
563cab2bb3Spatrick   ReservedAddressRange address_range_;
573cab2bb3Spatrick   uptr n_reserved_;
583cab2bb3Spatrick };
593cab2bb3Spatrick 
603cab2bb3Spatrick #if SANITIZER_WORDSIZE == 32
613cab2bb3Spatrick typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
623cab2bb3Spatrick #else
633cab2bb3Spatrick typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
643cab2bb3Spatrick #endif
653cab2bb3Spatrick 
663cab2bb3Spatrick // This class can (de)allocate only large chunks of memory using mmap/unmap.
673cab2bb3Spatrick // The main purpose of this allocator is to cover large and rare allocation
683cab2bb3Spatrick // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
693cab2bb3Spatrick template <class MapUnmapCallback = NoOpMapUnmapCallback,
703cab2bb3Spatrick           class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
713cab2bb3Spatrick           class AddressSpaceViewTy = LocalAddressSpaceView>
723cab2bb3Spatrick class LargeMmapAllocator {
733cab2bb3Spatrick  public:
743cab2bb3Spatrick   using AddressSpaceView = AddressSpaceViewTy;
InitLinkerInitialized()753cab2bb3Spatrick   void InitLinkerInitialized() {
763cab2bb3Spatrick     page_size_ = GetPageSizeCached();
773cab2bb3Spatrick     chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
783cab2bb3Spatrick   }
793cab2bb3Spatrick 
Init()803cab2bb3Spatrick   void Init() {
813cab2bb3Spatrick     internal_memset(this, 0, sizeof(*this));
823cab2bb3Spatrick     InitLinkerInitialized();
833cab2bb3Spatrick   }
843cab2bb3Spatrick 
Allocate(AllocatorStats * stat,uptr size,uptr alignment)853cab2bb3Spatrick   void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
863cab2bb3Spatrick     CHECK(IsPowerOfTwo(alignment));
873cab2bb3Spatrick     uptr map_size = RoundUpMapSize(size);
883cab2bb3Spatrick     if (alignment > page_size_)
893cab2bb3Spatrick       map_size += alignment;
903cab2bb3Spatrick     // Overflow.
913cab2bb3Spatrick     if (map_size < size) {
923cab2bb3Spatrick       Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
933cab2bb3Spatrick              "0x%zx bytes with 0x%zx alignment requested\n",
943cab2bb3Spatrick              SanitizerToolName, map_size, alignment);
953cab2bb3Spatrick       return nullptr;
963cab2bb3Spatrick     }
973cab2bb3Spatrick     uptr map_beg = reinterpret_cast<uptr>(
983cab2bb3Spatrick         MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
993cab2bb3Spatrick     if (!map_beg)
1003cab2bb3Spatrick       return nullptr;
1013cab2bb3Spatrick     CHECK(IsAligned(map_beg, page_size_));
1023cab2bb3Spatrick     MapUnmapCallback().OnMap(map_beg, map_size);
1033cab2bb3Spatrick     uptr map_end = map_beg + map_size;
1043cab2bb3Spatrick     uptr res = map_beg + page_size_;
1053cab2bb3Spatrick     if (res & (alignment - 1))  // Align.
1063cab2bb3Spatrick       res += alignment - (res & (alignment - 1));
1073cab2bb3Spatrick     CHECK(IsAligned(res, alignment));
1083cab2bb3Spatrick     CHECK(IsAligned(res, page_size_));
1093cab2bb3Spatrick     CHECK_GE(res + size, map_beg);
1103cab2bb3Spatrick     CHECK_LE(res + size, map_end);
1113cab2bb3Spatrick     Header *h = GetHeader(res);
1123cab2bb3Spatrick     h->size = size;
1133cab2bb3Spatrick     h->map_beg = map_beg;
1143cab2bb3Spatrick     h->map_size = map_size;
1153cab2bb3Spatrick     uptr size_log = MostSignificantSetBitIndex(map_size);
1163cab2bb3Spatrick     CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
1173cab2bb3Spatrick     {
1183cab2bb3Spatrick       SpinMutexLock l(&mutex_);
1193cab2bb3Spatrick       ptr_array_.EnsureSpace(n_chunks_);
1203cab2bb3Spatrick       uptr idx = n_chunks_++;
1213cab2bb3Spatrick       h->chunk_idx = idx;
1223cab2bb3Spatrick       chunks_[idx] = h;
1233cab2bb3Spatrick       chunks_sorted_ = false;
1243cab2bb3Spatrick       stats.n_allocs++;
1253cab2bb3Spatrick       stats.currently_allocated += map_size;
1263cab2bb3Spatrick       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
1273cab2bb3Spatrick       stats.by_size_log[size_log]++;
1283cab2bb3Spatrick       stat->Add(AllocatorStatAllocated, map_size);
1293cab2bb3Spatrick       stat->Add(AllocatorStatMapped, map_size);
1303cab2bb3Spatrick     }
1313cab2bb3Spatrick     return reinterpret_cast<void*>(res);
1323cab2bb3Spatrick   }
1333cab2bb3Spatrick 
Deallocate(AllocatorStats * stat,void * p)1343cab2bb3Spatrick   void Deallocate(AllocatorStats *stat, void *p) {
1353cab2bb3Spatrick     Header *h = GetHeader(p);
1363cab2bb3Spatrick     {
1373cab2bb3Spatrick       SpinMutexLock l(&mutex_);
1383cab2bb3Spatrick       uptr idx = h->chunk_idx;
1393cab2bb3Spatrick       CHECK_EQ(chunks_[idx], h);
1403cab2bb3Spatrick       CHECK_LT(idx, n_chunks_);
1413cab2bb3Spatrick       chunks_[idx] = chunks_[--n_chunks_];
1423cab2bb3Spatrick       chunks_[idx]->chunk_idx = idx;
1433cab2bb3Spatrick       chunks_sorted_ = false;
1443cab2bb3Spatrick       stats.n_frees++;
1453cab2bb3Spatrick       stats.currently_allocated -= h->map_size;
1463cab2bb3Spatrick       stat->Sub(AllocatorStatAllocated, h->map_size);
1473cab2bb3Spatrick       stat->Sub(AllocatorStatMapped, h->map_size);
1483cab2bb3Spatrick     }
1493cab2bb3Spatrick     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
1503cab2bb3Spatrick     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
1513cab2bb3Spatrick   }
1523cab2bb3Spatrick 
TotalMemoryUsed()1533cab2bb3Spatrick   uptr TotalMemoryUsed() {
1543cab2bb3Spatrick     SpinMutexLock l(&mutex_);
1553cab2bb3Spatrick     uptr res = 0;
1563cab2bb3Spatrick     for (uptr i = 0; i < n_chunks_; i++) {
1573cab2bb3Spatrick       Header *h = chunks_[i];
1583cab2bb3Spatrick       CHECK_EQ(h->chunk_idx, i);
1593cab2bb3Spatrick       res += RoundUpMapSize(h->size);
1603cab2bb3Spatrick     }
1613cab2bb3Spatrick     return res;
1623cab2bb3Spatrick   }
1633cab2bb3Spatrick 
PointerIsMine(const void * p)164*810390e3Srobert   bool PointerIsMine(const void *p) const {
1653cab2bb3Spatrick     return GetBlockBegin(p) != nullptr;
1663cab2bb3Spatrick   }
1673cab2bb3Spatrick 
GetActuallyAllocatedSize(void * p)1683cab2bb3Spatrick   uptr GetActuallyAllocatedSize(void *p) {
1693cab2bb3Spatrick     return RoundUpTo(GetHeader(p)->size, page_size_);
1703cab2bb3Spatrick   }
1713cab2bb3Spatrick 
1723cab2bb3Spatrick   // At least page_size_/2 metadata bytes is available.
GetMetaData(const void * p)1733cab2bb3Spatrick   void *GetMetaData(const void *p) {
1743cab2bb3Spatrick     // Too slow: CHECK_EQ(p, GetBlockBegin(p));
1753cab2bb3Spatrick     if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
1763cab2bb3Spatrick       Printf("%s: bad pointer %p\n", SanitizerToolName, p);
1773cab2bb3Spatrick       CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
1783cab2bb3Spatrick     }
1793cab2bb3Spatrick     return GetHeader(p) + 1;
1803cab2bb3Spatrick   }
1813cab2bb3Spatrick 
GetBlockBegin(const void * ptr)182*810390e3Srobert   void *GetBlockBegin(const void *ptr) const {
1833cab2bb3Spatrick     uptr p = reinterpret_cast<uptr>(ptr);
1843cab2bb3Spatrick     SpinMutexLock l(&mutex_);
1853cab2bb3Spatrick     uptr nearest_chunk = 0;
1863cab2bb3Spatrick     Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
1873cab2bb3Spatrick     // Cache-friendly linear search.
1883cab2bb3Spatrick     for (uptr i = 0; i < n_chunks_; i++) {
1893cab2bb3Spatrick       uptr ch = reinterpret_cast<uptr>(chunks[i]);
1903cab2bb3Spatrick       if (p < ch) continue;  // p is at left to this chunk, skip it.
1913cab2bb3Spatrick       if (p - ch < p - nearest_chunk)
1923cab2bb3Spatrick         nearest_chunk = ch;
1933cab2bb3Spatrick     }
1943cab2bb3Spatrick     if (!nearest_chunk)
1953cab2bb3Spatrick       return nullptr;
1963cab2bb3Spatrick     const Header *h =
1973cab2bb3Spatrick         AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
1983cab2bb3Spatrick     Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
1993cab2bb3Spatrick     CHECK_GE(nearest_chunk, h->map_beg);
2003cab2bb3Spatrick     CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
2013cab2bb3Spatrick     CHECK_LE(nearest_chunk, p);
2023cab2bb3Spatrick     if (h->map_beg + h->map_size <= p)
2033cab2bb3Spatrick       return nullptr;
2043cab2bb3Spatrick     return GetUser(h_ptr);
2053cab2bb3Spatrick   }
2063cab2bb3Spatrick 
EnsureSortedChunks()2073cab2bb3Spatrick   void EnsureSortedChunks() {
2083cab2bb3Spatrick     if (chunks_sorted_) return;
2093cab2bb3Spatrick     Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
2103cab2bb3Spatrick     Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
2113cab2bb3Spatrick     for (uptr i = 0; i < n_chunks_; i++)
2123cab2bb3Spatrick       AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
2133cab2bb3Spatrick     chunks_sorted_ = true;
2143cab2bb3Spatrick   }
2153cab2bb3Spatrick 
2163cab2bb3Spatrick   // This function does the same as GetBlockBegin, but is much faster.
2173cab2bb3Spatrick   // Must be called with the allocator locked.
GetBlockBeginFastLocked(const void * ptr)218*810390e3Srobert   void *GetBlockBeginFastLocked(const void *ptr) {
2193cab2bb3Spatrick     mutex_.CheckLocked();
2203cab2bb3Spatrick     uptr p = reinterpret_cast<uptr>(ptr);
2213cab2bb3Spatrick     uptr n = n_chunks_;
2223cab2bb3Spatrick     if (!n) return nullptr;
2233cab2bb3Spatrick     EnsureSortedChunks();
2243cab2bb3Spatrick     Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
2253cab2bb3Spatrick     auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
2263cab2bb3Spatrick     auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
2273cab2bb3Spatrick                      AddressSpaceView::Load(chunks[n - 1])->map_size;
2283cab2bb3Spatrick     if (p < min_mmap_ || p >= max_mmap_)
2293cab2bb3Spatrick       return nullptr;
2303cab2bb3Spatrick     uptr beg = 0, end = n - 1;
2313cab2bb3Spatrick     // This loop is a log(n) lower_bound. It does not check for the exact match
2323cab2bb3Spatrick     // to avoid expensive cache-thrashing loads.
2333cab2bb3Spatrick     while (end - beg >= 2) {
2343cab2bb3Spatrick       uptr mid = (beg + end) / 2;  // Invariant: mid >= beg + 1
2353cab2bb3Spatrick       if (p < reinterpret_cast<uptr>(chunks[mid]))
2363cab2bb3Spatrick         end = mid - 1;  // We are not interested in chunks[mid].
2373cab2bb3Spatrick       else
2383cab2bb3Spatrick         beg = mid;  // chunks[mid] may still be what we want.
2393cab2bb3Spatrick     }
2403cab2bb3Spatrick 
2413cab2bb3Spatrick     if (beg < end) {
2423cab2bb3Spatrick       CHECK_EQ(beg + 1, end);
2433cab2bb3Spatrick       // There are 2 chunks left, choose one.
2443cab2bb3Spatrick       if (p >= reinterpret_cast<uptr>(chunks[end]))
2453cab2bb3Spatrick         beg = end;
2463cab2bb3Spatrick     }
2473cab2bb3Spatrick 
2483cab2bb3Spatrick     const Header *h = AddressSpaceView::Load(chunks[beg]);
2493cab2bb3Spatrick     Header *h_ptr = chunks[beg];
2503cab2bb3Spatrick     if (h->map_beg + h->map_size <= p || p < h->map_beg)
2513cab2bb3Spatrick       return nullptr;
2523cab2bb3Spatrick     return GetUser(h_ptr);
2533cab2bb3Spatrick   }
2543cab2bb3Spatrick 
PrintStats()2553cab2bb3Spatrick   void PrintStats() {
2563cab2bb3Spatrick     Printf("Stats: LargeMmapAllocator: allocated %zd times, "
2573cab2bb3Spatrick            "remains %zd (%zd K) max %zd M; by size logs: ",
2583cab2bb3Spatrick            stats.n_allocs, stats.n_allocs - stats.n_frees,
2593cab2bb3Spatrick            stats.currently_allocated >> 10, stats.max_allocated >> 20);
2603cab2bb3Spatrick     for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
2613cab2bb3Spatrick       uptr c = stats.by_size_log[i];
2623cab2bb3Spatrick       if (!c) continue;
2633cab2bb3Spatrick       Printf("%zd:%zd; ", i, c);
2643cab2bb3Spatrick     }
2653cab2bb3Spatrick     Printf("\n");
2663cab2bb3Spatrick   }
2673cab2bb3Spatrick 
2683cab2bb3Spatrick   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
2693cab2bb3Spatrick   // introspection API.
ForceLock()270*810390e3Srobert   void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
2713cab2bb3Spatrick 
ForceUnlock()272*810390e3Srobert   void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
2733cab2bb3Spatrick 
2743cab2bb3Spatrick   // Iterate over all existing chunks.
2753cab2bb3Spatrick   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)2763cab2bb3Spatrick   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
2773cab2bb3Spatrick     EnsureSortedChunks();  // Avoid doing the sort while iterating.
2783cab2bb3Spatrick     const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
2793cab2bb3Spatrick     for (uptr i = 0; i < n_chunks_; i++) {
2803cab2bb3Spatrick       const Header *t = chunks[i];
2813cab2bb3Spatrick       callback(reinterpret_cast<uptr>(GetUser(t)), arg);
2823cab2bb3Spatrick       // Consistency check: verify that the array did not change.
2833cab2bb3Spatrick       CHECK_EQ(chunks[i], t);
2843cab2bb3Spatrick       CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
2853cab2bb3Spatrick     }
2863cab2bb3Spatrick   }
2873cab2bb3Spatrick 
2883cab2bb3Spatrick  private:
2893cab2bb3Spatrick   struct Header {
2903cab2bb3Spatrick     uptr map_beg;
2913cab2bb3Spatrick     uptr map_size;
2923cab2bb3Spatrick     uptr size;
2933cab2bb3Spatrick     uptr chunk_idx;
2943cab2bb3Spatrick   };
2953cab2bb3Spatrick 
GetHeader(uptr p)2963cab2bb3Spatrick   Header *GetHeader(uptr p) {
2973cab2bb3Spatrick     CHECK(IsAligned(p, page_size_));
2983cab2bb3Spatrick     return reinterpret_cast<Header*>(p - page_size_);
2993cab2bb3Spatrick   }
GetHeader(const void * p)3003cab2bb3Spatrick   Header *GetHeader(const void *p) {
3013cab2bb3Spatrick     return GetHeader(reinterpret_cast<uptr>(p));
3023cab2bb3Spatrick   }
3033cab2bb3Spatrick 
GetUser(const Header * h)304*810390e3Srobert   void *GetUser(const Header *h) const {
3053cab2bb3Spatrick     CHECK(IsAligned((uptr)h, page_size_));
3063cab2bb3Spatrick     return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
3073cab2bb3Spatrick   }
3083cab2bb3Spatrick 
RoundUpMapSize(uptr size)3093cab2bb3Spatrick   uptr RoundUpMapSize(uptr size) {
3103cab2bb3Spatrick     return RoundUpTo(size, page_size_) + page_size_;
3113cab2bb3Spatrick   }
3123cab2bb3Spatrick 
3133cab2bb3Spatrick   uptr page_size_;
3143cab2bb3Spatrick   Header **chunks_;
3153cab2bb3Spatrick   PtrArrayT ptr_array_;
3163cab2bb3Spatrick   uptr n_chunks_;
3173cab2bb3Spatrick   bool chunks_sorted_;
3183cab2bb3Spatrick   struct Stats {
3193cab2bb3Spatrick     uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
3203cab2bb3Spatrick   } stats;
321*810390e3Srobert   mutable StaticSpinMutex mutex_;
3223cab2bb3Spatrick };
323