1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "gwp_asan/guarded_pool_allocator.h" 10 11 #include "gwp_asan/optional/segv_handler.h" 12 #include "gwp_asan/options.h" 13 #include "gwp_asan/utilities.h" 14 15 // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this 16 // macro is defined before including <inttypes.h>. 17 #ifndef __STDC_FORMAT_MACROS 18 #define __STDC_FORMAT_MACROS 1 19 #endif 20 21 #include <assert.h> 22 #include <inttypes.h> 23 #include <signal.h> 24 #include <stdio.h> 25 #include <stdlib.h> 26 #include <string.h> 27 #include <time.h> 28 29 using AllocationMetadata = gwp_asan::AllocationMetadata; 30 using Error = gwp_asan::Error; 31 32 namespace gwp_asan { 33 namespace { 34 // Forward declare the pointer to the singleton version of this class. 35 // Instantiated during initialisation, this allows the signal handler 36 // to find this class in order to deduce the root cause of failures. Must not be 37 // referenced by users outside this translation unit, in order to avoid 38 // init-order-fiasco. 39 GuardedPoolAllocator *SingletonPtr = nullptr; 40 } // anonymous namespace 41 42 // Gets the singleton implementation of this class. Thread-compatible until 43 // init() is called, thread-safe afterwards. 44 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() { 45 return SingletonPtr; 46 } 47 48 void GuardedPoolAllocator::init(const options::Options &Opts) { 49 // Note: We return from the constructor here if GWP-ASan is not available. 50 // This will stop heap-allocation of class members, as well as mmap() of the 51 // guarded slots. 52 if (!Opts.Enabled || Opts.SampleRate == 0 || 53 Opts.MaxSimultaneousAllocations == 0) 54 return; 55 56 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0."); 57 Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30."); 58 Check(Opts.MaxSimultaneousAllocations >= 0, 59 "GWP-ASan Error: MaxSimultaneousAllocations is < 0."); 60 61 SingletonPtr = this; 62 Backtrace = Opts.Backtrace; 63 64 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; 65 66 State.PageSize = getPlatformPageSize(); 67 68 PerfectlyRightAlign = Opts.PerfectlyRightAlign; 69 70 size_t PoolBytesRequired = 71 State.PageSize * (1 + State.MaxSimultaneousAllocations) + 72 State.MaxSimultaneousAllocations * State.maximumAllocationSize(); 73 void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName); 74 75 size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata); 76 Metadata = reinterpret_cast<AllocationMetadata *>( 77 mapMemory(BytesRequired, kGwpAsanMetadataName)); 78 markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName); 79 80 // Allocate memory and set up the free pages queue. 81 BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots); 82 FreeSlots = reinterpret_cast<size_t *>( 83 mapMemory(BytesRequired, kGwpAsanFreeSlotsName)); 84 markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName); 85 86 // Multiply the sample rate by 2 to give a good, fast approximation for (1 / 87 // SampleRate) chance of sampling. 88 if (Opts.SampleRate != 1) 89 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1; 90 else 91 AdjustedSampleRatePlusOne = 2; 92 93 initPRNG(); 94 ThreadLocals.NextSampleCounter = 95 ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) & 96 ThreadLocalPackedVariables::NextSampleCounterMask; 97 98 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); 99 State.GuardedPagePoolEnd = 100 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired; 101 102 if (Opts.InstallForkHandlers) 103 installAtFork(); 104 } 105 106 void GuardedPoolAllocator::disable() { PoolMutex.lock(); } 107 108 void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } 109 110 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, 111 void *Arg) { 112 uintptr_t Start = reinterpret_cast<uintptr_t>(Base); 113 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) { 114 const AllocationMetadata &Meta = Metadata[i]; 115 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && 116 Meta.Addr < Start + Size) 117 Cb(Meta.Addr, Meta.Size, Arg); 118 } 119 } 120 121 void GuardedPoolAllocator::uninitTestOnly() { 122 if (State.GuardedPagePool) { 123 unmapMemory(reinterpret_cast<void *>(State.GuardedPagePool), 124 State.GuardedPagePoolEnd - State.GuardedPagePool, 125 kGwpAsanGuardPageName); 126 State.GuardedPagePool = 0; 127 State.GuardedPagePoolEnd = 0; 128 } 129 if (Metadata) { 130 unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata), 131 kGwpAsanMetadataName); 132 Metadata = nullptr; 133 } 134 if (FreeSlots) { 135 unmapMemory(FreeSlots, 136 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), 137 kGwpAsanFreeSlotsName); 138 FreeSlots = nullptr; 139 } 140 } 141 142 static uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) { 143 return Ptr & ~(PageSize - 1); 144 } 145 146 void *GuardedPoolAllocator::allocate(size_t Size) { 147 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall 148 // back to the supporting allocator. 149 if (State.GuardedPagePoolEnd == 0) { 150 ThreadLocals.NextSampleCounter = 151 (AdjustedSampleRatePlusOne - 1) & 152 ThreadLocalPackedVariables::NextSampleCounterMask; 153 return nullptr; 154 } 155 156 // Protect against recursivity. 157 if (ThreadLocals.RecursiveGuard) 158 return nullptr; 159 ScopedRecursiveGuard SRG; 160 161 if (Size == 0 || Size > State.maximumAllocationSize()) 162 return nullptr; 163 164 size_t Index; 165 { 166 ScopedLock L(PoolMutex); 167 Index = reserveSlot(); 168 } 169 170 if (Index == kInvalidSlotID) 171 return nullptr; 172 173 uintptr_t Ptr = State.slotToAddr(Index); 174 // Should we right-align this allocation? 175 if (getRandomUnsigned32() % 2 == 0) { 176 AlignmentStrategy Align = AlignmentStrategy::DEFAULT; 177 if (PerfectlyRightAlign) 178 Align = AlignmentStrategy::PERFECT; 179 Ptr += 180 State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align); 181 } 182 AllocationMetadata *Meta = addrToMetadata(Ptr); 183 184 // If a slot is multiple pages in size, and the allocation takes up a single 185 // page, we can improve overflow detection by leaving the unused pages as 186 // unmapped. 187 markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr, State.PageSize)), 188 Size, kGwpAsanAliveSlotName); 189 190 Meta->RecordAllocation(Ptr, Size); 191 Meta->AllocationTrace.RecordBacktrace(Backtrace); 192 193 return reinterpret_cast<void *>(Ptr); 194 } 195 196 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) { 197 State.FailureType = E; 198 State.FailureAddress = Address; 199 200 // Raise a SEGV by touching first guard page. 201 volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool); 202 *p = 0; 203 __builtin_unreachable(); 204 } 205 206 void GuardedPoolAllocator::stop() { 207 ThreadLocals.RecursiveGuard = true; 208 PoolMutex.tryLock(); 209 } 210 211 void GuardedPoolAllocator::deallocate(void *Ptr) { 212 assert(pointerIsMine(Ptr) && "Pointer is not mine!"); 213 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr); 214 size_t Slot = State.getNearestSlot(UPtr); 215 uintptr_t SlotStart = State.slotToAddr(Slot); 216 AllocationMetadata *Meta = addrToMetadata(UPtr); 217 if (Meta->Addr != UPtr) { 218 // If multiple errors occur at the same time, use the first one. 219 ScopedLock L(PoolMutex); 220 trapOnAddress(UPtr, Error::INVALID_FREE); 221 } 222 223 // Intentionally scope the mutex here, so that other threads can access the 224 // pool during the expensive markInaccessible() call. 225 { 226 ScopedLock L(PoolMutex); 227 if (Meta->IsDeallocated) { 228 trapOnAddress(UPtr, Error::DOUBLE_FREE); 229 } 230 231 // Ensure that the deallocation is recorded before marking the page as 232 // inaccessible. Otherwise, a racy use-after-free will have inconsistent 233 // metadata. 234 Meta->RecordDeallocation(); 235 236 // Ensure that the unwinder is not called if the recursive flag is set, 237 // otherwise non-reentrant unwinders may deadlock. 238 if (!ThreadLocals.RecursiveGuard) { 239 ScopedRecursiveGuard SRG; 240 Meta->DeallocationTrace.RecordBacktrace(Backtrace); 241 } 242 } 243 244 markInaccessible(reinterpret_cast<void *>(SlotStart), 245 State.maximumAllocationSize(), kGwpAsanGuardPageName); 246 247 // And finally, lock again to release the slot back into the pool. 248 ScopedLock L(PoolMutex); 249 freeSlot(Slot); 250 } 251 252 size_t GuardedPoolAllocator::getSize(const void *Ptr) { 253 assert(pointerIsMine(Ptr)); 254 ScopedLock L(PoolMutex); 255 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr)); 256 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr)); 257 return Meta->Size; 258 } 259 260 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { 261 return &Metadata[State.getNearestSlot(Ptr)]; 262 } 263 264 size_t GuardedPoolAllocator::reserveSlot() { 265 // Avoid potential reuse of a slot before we have made at least a single 266 // allocation in each slot. Helps with our use-after-free detection. 267 if (NumSampledAllocations < State.MaxSimultaneousAllocations) 268 return NumSampledAllocations++; 269 270 if (FreeSlotsLength == 0) 271 return kInvalidSlotID; 272 273 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength; 274 size_t SlotIndex = FreeSlots[ReservedIndex]; 275 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength]; 276 return SlotIndex; 277 } 278 279 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { 280 assert(FreeSlotsLength < State.MaxSimultaneousAllocations); 281 FreeSlots[FreeSlotsLength++] = SlotIndex; 282 } 283 284 uint32_t GuardedPoolAllocator::getRandomUnsigned32() { 285 uint32_t RandomState = ThreadLocals.RandomState; 286 RandomState ^= RandomState << 13; 287 RandomState ^= RandomState >> 17; 288 RandomState ^= RandomState << 5; 289 ThreadLocals.RandomState = RandomState; 290 return RandomState; 291 } 292 293 GWP_ASAN_TLS_INITIAL_EXEC 294 GuardedPoolAllocator::ThreadLocalPackedVariables 295 GuardedPoolAllocator::ThreadLocals; 296 } // namespace gwp_asan 297