1 //===-- asan_allocator.h ----------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // ASan-private header for asan_allocator.cpp. 12 //===----------------------------------------------------------------------===// 13 14 #ifndef ASAN_ALLOCATOR_H 15 #define ASAN_ALLOCATOR_H 16 17 #include "asan_flags.h" 18 #include "asan_interceptors.h" 19 #include "asan_internal.h" 20 #include "sanitizer_common/sanitizer_allocator.h" 21 #include "sanitizer_common/sanitizer_list.h" 22 #include "sanitizer_common/sanitizer_platform.h" 23 24 namespace __asan { 25 26 enum AllocType { 27 FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc. 28 FROM_NEW = 2, // Memory block came from operator new. 29 FROM_NEW_BR = 3 // Memory block came from operator new [ ] 30 }; 31 32 class AsanChunk; 33 34 struct AllocatorOptions { 35 u32 quarantine_size_mb; 36 u32 thread_local_quarantine_size_kb; 37 u16 min_redzone; 38 u16 max_redzone; 39 u8 may_return_null; 40 u8 alloc_dealloc_mismatch; 41 s32 release_to_os_interval_ms; 42 43 void SetFrom(const Flags *f, const CommonFlags *cf); 44 void CopyTo(Flags *f, CommonFlags *cf); 45 }; 46 47 void InitializeAllocator(const AllocatorOptions &options); 48 void ReInitializeAllocator(const AllocatorOptions &options); 49 void GetAllocatorOptions(AllocatorOptions *options); 50 51 class AsanChunkView { 52 public: 53 explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {} 54 bool IsValid() const; // Checks if AsanChunkView points to a valid 55 // allocated or quarantined chunk. 56 bool IsAllocated() const; // Checks if the memory is currently allocated. 57 bool IsQuarantined() const; // Checks if the memory is currently quarantined. 58 uptr Beg() const; // First byte of user memory. 59 uptr End() const; // Last byte of user memory. 60 uptr UsedSize() const; // Size requested by the user. 61 u32 UserRequestedAlignment() const; // Originally requested alignment. 62 uptr AllocTid() const; 63 uptr FreeTid() const; 64 bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } 65 u32 GetAllocStackId() const; 66 u32 GetFreeStackId() const; 67 AllocType GetAllocType() const; 68 bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const { 69 if (addr >= Beg() && (addr + access_size) <= End()) { 70 *offset = addr - Beg(); 71 return true; 72 } 73 return false; 74 } 75 bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const { 76 (void)access_size; 77 if (addr < Beg()) { 78 *offset = Beg() - addr; 79 return true; 80 } 81 return false; 82 } 83 bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const { 84 if (addr + access_size > End()) { 85 *offset = addr - End(); 86 return true; 87 } 88 return false; 89 } 90 91 private: 92 AsanChunk *const chunk_; 93 }; 94 95 AsanChunkView FindHeapChunkByAddress(uptr address); 96 AsanChunkView FindHeapChunkByAllocBeg(uptr address); 97 98 // List of AsanChunks with total size. 99 class AsanChunkFifoList: public IntrusiveList<AsanChunk> { 100 public: 101 explicit AsanChunkFifoList(LinkerInitialized) { } 102 AsanChunkFifoList() { clear(); } 103 void Push(AsanChunk *n); 104 void PushList(AsanChunkFifoList *q); 105 AsanChunk *Pop(); 106 uptr size() { return size_; } 107 void clear() { 108 IntrusiveList<AsanChunk>::clear(); 109 size_ = 0; 110 } 111 private: 112 uptr size_; 113 }; 114 115 struct AsanMapUnmapCallback { 116 void OnMap(uptr p, uptr size) const; 117 void OnMapSecondary(uptr p, uptr size, uptr user_begin, uptr user_size) const; 118 void OnUnmap(uptr p, uptr size) const; 119 }; 120 121 #if SANITIZER_CAN_USE_ALLOCATOR64 122 # if SANITIZER_FUCHSIA 123 // This is a sentinel indicating we do not want the primary allocator arena to 124 // be placed at a fixed address. It will be anonymously mmap'd. 125 const uptr kAllocatorSpace = ~(uptr)0; 126 # if SANITIZER_RISCV64 127 128 // These are sanitizer tunings that allow all bringup tests for RISCV-64 Sv39 + 129 // Fuchsia to run with asan-instrumented. That is, we can run bringup, e2e, 130 // libc, and scudo tests with this configuration. 131 // 132 // TODO: This is specifically tuned for Sv39. 48/57 will likely require other 133 // tunings, or possibly use the same tunings Fuchsia uses for other archs. The 134 // VMA size isn't technically tied to the Fuchsia System ABI, so once 48/57 is 135 // supported, we'd need a way of dynamically checking what the VMA size is and 136 // determining optimal configuration. 137 138 // This indicates the total amount of space dedicated for the primary allocator 139 // during initialization. This is roughly proportional to the size set by the 140 // FuchsiaConfig for scudo (~11.25GB == ~2^33.49). Requesting any more could 141 // lead to some failures in sanitized bringup tests where we can't allocate new 142 // vmars because there wouldn't be enough contiguous space. We could try 2^34 if 143 // we re-evaluate the SizeClassMap settings. 144 const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB 145 146 // This is roughly equivalent to the configuration for the VeryDenseSizeClassMap 147 // but has fewer size classes (ideally at most 32). Fewer class sizes means the 148 // region size for each class is larger, thus less chances of running out of 149 // space for each region. The main differences are the MidSizeLog (which is 150 // smaller) and the MaxSizeLog (which is larger). 151 // 152 // - The MaxSizeLog is higher to allow some of the largest allocations I've 153 // observed to be placed in the primary allocator's arena as opposed to being 154 // mmap'd by the secondary allocator. This helps reduce fragmentation from 155 // large classes. A huge example of this the scudo allocator tests (and its 156 // testing infrastructure) which malloc's/new's objects on the order of 157 // hundreds of kilobytes which normally would not be in the primary allocator 158 // arena with the default VeryDenseSizeClassMap. 159 // - The MidSizeLog is reduced to help shrink the number of size classes and 160 // increase region size. Without this, we'd see ASan complain many times about 161 // a region running out of available space. 162 // 163 // This differs a bit from the fuchsia config in scudo, mainly from the NumBits, 164 // MaxSizeLog, and NumCachedHintT. This should place the number of size classes 165 // for scudo at 45 and some large objects allocated by this config would be 166 // placed in the arena whereas scudo would mmap them. The asan allocator needs 167 // to have a number of classes that are a power of 2 for various internal things 168 // to work, so we can't match the scudo settings to a tee. The sanitizer 169 // allocator is slightly slower than scudo's but this is enough to get 170 // memory-intensive scudo tests to run with asan instrumentation. 171 typedef SizeClassMap</*kNumBits=*/2, 172 /*kMinSizeLog=*/5, 173 /*kMidSizeLog=*/8, 174 /*kMaxSizeLog=*/18, 175 /*kNumCachedHintT=*/8, 176 /*kMaxBytesCachedLog=*/10> 177 SizeClassMap; 178 static_assert(SizeClassMap::kNumClassesRounded <= 32, 179 "The above tunings were specifically selected to ensure there " 180 "would be at most 32 size classes. This restriction could be " 181 "loosened to 64 size classes if we can find a configuration of " 182 "allocator size and SizeClassMap tunings that allows us to " 183 "reliably run all bringup tests in a sanitized environment."); 184 185 # else // SANITIZER_RISCV64 186 // These are the default allocator tunings for non-RISCV environments where the 187 // VMA is usually 48 bits and we have lots of space. 188 const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 189 typedef DefaultSizeClassMap SizeClassMap; 190 # endif // SANITIZER_RISCV64 191 # else // SANITIZER_FUCHSIA 192 193 # if SANITIZER_APPLE 194 const uptr kAllocatorSpace = 0x600000000000ULL; 195 # else // SANITIZER_APPLE 196 const uptr kAllocatorSpace = ~(uptr)0; 197 # endif // SANITIZER_APPLE 198 199 # if defined(__powerpc64__) 200 const uptr kAllocatorSize = 0x20000000000ULL; // 2T. 201 typedef DefaultSizeClassMap SizeClassMap; 202 # elif defined(__aarch64__) && SANITIZER_ANDROID 203 // Android needs to support 39, 42 and 48 bit VMA. 204 const uptr kAllocatorSize = 0x2000000000ULL; // 128G. 205 typedef VeryCompactSizeClassMap SizeClassMap; 206 # elif SANITIZER_RISCV64 207 const uptr kAllocatorSize = 0x2000000000ULL; // 128G. 208 typedef VeryDenseSizeClassMap SizeClassMap; 209 # elif defined(__sparc__) 210 const uptr kAllocatorSize = 0x20000000000ULL; // 2T. 211 typedef DefaultSizeClassMap SizeClassMap; 212 # elif SANITIZER_WINDOWS 213 const uptr kAllocatorSize = 0x8000000000ULL; // 500G 214 typedef DefaultSizeClassMap SizeClassMap; 215 # elif SANITIZER_APPLE 216 const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 217 typedef DefaultSizeClassMap SizeClassMap; 218 # else 219 const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 220 typedef DefaultSizeClassMap SizeClassMap; 221 # endif // defined(__powerpc64__) etc. 222 # endif // SANITIZER_FUCHSIA 223 template <typename AddressSpaceViewTy> 224 struct AP64 { // Allocator64 parameters. Deliberately using a short name. 225 static const uptr kSpaceBeg = kAllocatorSpace; 226 static const uptr kSpaceSize = kAllocatorSize; 227 static const uptr kMetadataSize = 0; 228 typedef __asan::SizeClassMap SizeClassMap; 229 typedef AsanMapUnmapCallback MapUnmapCallback; 230 static const uptr kFlags = 0; 231 using AddressSpaceView = AddressSpaceViewTy; 232 }; 233 234 template <typename AddressSpaceView> 235 using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>; 236 using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>; 237 #else // SANITIZER_CAN_USE_ALLOCATOR64. Fallback to SizeClassAllocator32. 238 typedef CompactSizeClassMap SizeClassMap; 239 template <typename AddressSpaceViewTy> 240 struct AP32 { 241 static const uptr kSpaceBeg = 0; 242 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; 243 static const uptr kMetadataSize = 0; 244 typedef __asan::SizeClassMap SizeClassMap; 245 static const uptr kRegionSizeLog = 20; 246 using AddressSpaceView = AddressSpaceViewTy; 247 typedef AsanMapUnmapCallback MapUnmapCallback; 248 static const uptr kFlags = 0; 249 }; 250 template <typename AddressSpaceView> 251 using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView> >; 252 using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>; 253 #endif // SANITIZER_CAN_USE_ALLOCATOR64 254 255 static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses; 256 257 template <typename AddressSpaceView> 258 using AsanAllocatorASVT = 259 CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>; 260 using AsanAllocator = AsanAllocatorASVT<LocalAddressSpaceView>; 261 using AllocatorCache = AsanAllocator::AllocatorCache; 262 263 struct AsanThreadLocalMallocStorage { 264 uptr quarantine_cache[16]; 265 AllocatorCache allocator_cache; 266 void CommitBack(); 267 private: 268 // These objects are allocated via mmap() and are zero-initialized. 269 AsanThreadLocalMallocStorage() {} 270 }; 271 272 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, 273 AllocType alloc_type); 274 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type); 275 void asan_delete(void *ptr, uptr size, uptr alignment, 276 BufferedStackTrace *stack, AllocType alloc_type); 277 278 void *asan_malloc(uptr size, BufferedStackTrace *stack); 279 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack); 280 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack); 281 void *asan_reallocarray(void *p, uptr nmemb, uptr size, 282 BufferedStackTrace *stack); 283 void *asan_valloc(uptr size, BufferedStackTrace *stack); 284 void *asan_pvalloc(uptr size, BufferedStackTrace *stack); 285 286 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack); 287 int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 288 BufferedStackTrace *stack); 289 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp); 290 291 uptr asan_mz_size(const void *ptr); 292 void asan_mz_force_lock(); 293 void asan_mz_force_unlock(); 294 295 void PrintInternalAllocatorStats(); 296 void AsanSoftRssLimitExceededCallback(bool exceeded); 297 298 } // namespace __asan 299 #endif // ASAN_ALLOCATOR_H 300