1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "gwp_asan/guarded_pool_allocator.h" 10 11 #include "gwp_asan/options.h" 12 #include "gwp_asan/utilities.h" 13 #include "optional/segv_handler.h" 14 15 // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this 16 // macro is defined before including <inttypes.h>. 17 #ifndef __STDC_FORMAT_MACROS 18 #define __STDC_FORMAT_MACROS 1 19 #endif 20 21 #include <assert.h> 22 #include <inttypes.h> 23 #include <signal.h> 24 #include <stdio.h> 25 #include <stdlib.h> 26 #include <string.h> 27 #include <time.h> 28 29 using AllocationMetadata = gwp_asan::AllocationMetadata; 30 using Error = gwp_asan::Error; 31 32 namespace gwp_asan { 33 namespace { 34 // Forward declare the pointer to the singleton version of this class. 35 // Instantiated during initialisation, this allows the signal handler 36 // to find this class in order to deduce the root cause of failures. Must not be 37 // referenced by users outside this translation unit, in order to avoid 38 // init-order-fiasco. 39 GuardedPoolAllocator *SingletonPtr = nullptr; 40 41 class ScopedBoolean { 42 public: 43 ScopedBoolean(bool &B) : Bool(B) { Bool = true; } 44 ~ScopedBoolean() { Bool = false; } 45 46 private: 47 bool &Bool; 48 }; 49 } // anonymous namespace 50 51 // Gets the singleton implementation of this class. Thread-compatible until 52 // init() is called, thread-safe afterwards. 53 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() { 54 return SingletonPtr; 55 } 56 57 void GuardedPoolAllocator::init(const options::Options &Opts) { 58 // Note: We return from the constructor here if GWP-ASan is not available. 59 // This will stop heap-allocation of class members, as well as mmap() of the 60 // guarded slots. 61 if (!Opts.Enabled || Opts.SampleRate == 0 || 62 Opts.MaxSimultaneousAllocations == 0) 63 return; 64 65 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0."); 66 Check(Opts.SampleRate <= INT32_MAX, "GWP-ASan Error: SampleRate is > 2^31."); 67 Check(Opts.MaxSimultaneousAllocations >= 0, 68 "GWP-ASan Error: MaxSimultaneousAllocations is < 0."); 69 70 SingletonPtr = this; 71 Backtrace = Opts.Backtrace; 72 73 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; 74 75 State.PageSize = getPlatformPageSize(); 76 77 PerfectlyRightAlign = Opts.PerfectlyRightAlign; 78 79 size_t PoolBytesRequired = 80 State.PageSize * (1 + State.MaxSimultaneousAllocations) + 81 State.MaxSimultaneousAllocations * State.maximumAllocationSize(); 82 void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName); 83 84 size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata); 85 Metadata = reinterpret_cast<AllocationMetadata *>( 86 mapMemory(BytesRequired, kGwpAsanMetadataName)); 87 markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName); 88 89 // Allocate memory and set up the free pages queue. 90 BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots); 91 FreeSlots = reinterpret_cast<size_t *>( 92 mapMemory(BytesRequired, kGwpAsanFreeSlotsName)); 93 markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName); 94 95 // Multiply the sample rate by 2 to give a good, fast approximation for (1 / 96 // SampleRate) chance of sampling. 97 if (Opts.SampleRate != 1) 98 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1; 99 else 100 AdjustedSampleRatePlusOne = 2; 101 102 initPRNG(); 103 ThreadLocals.NextSampleCounter = 104 (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1; 105 106 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); 107 State.GuardedPagePoolEnd = 108 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired; 109 110 if (Opts.InstallForkHandlers) 111 installAtFork(); 112 } 113 114 void GuardedPoolAllocator::disable() { PoolMutex.lock(); } 115 116 void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } 117 118 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, 119 void *Arg) { 120 uintptr_t Start = reinterpret_cast<uintptr_t>(Base); 121 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) { 122 const AllocationMetadata &Meta = Metadata[i]; 123 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && 124 Meta.Addr < Start + Size) 125 Cb(Meta.Addr, Meta.Size, Arg); 126 } 127 } 128 129 void GuardedPoolAllocator::uninitTestOnly() { 130 if (State.GuardedPagePool) { 131 unmapMemory(reinterpret_cast<void *>(State.GuardedPagePool), 132 State.GuardedPagePoolEnd - State.GuardedPagePool, 133 kGwpAsanGuardPageName); 134 State.GuardedPagePool = 0; 135 State.GuardedPagePoolEnd = 0; 136 } 137 if (Metadata) { 138 unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata), 139 kGwpAsanMetadataName); 140 Metadata = nullptr; 141 } 142 if (FreeSlots) { 143 unmapMemory(FreeSlots, 144 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), 145 kGwpAsanFreeSlotsName); 146 FreeSlots = nullptr; 147 } 148 } 149 150 static uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) { 151 return Ptr & ~(PageSize - 1); 152 } 153 154 void *GuardedPoolAllocator::allocate(size_t Size) { 155 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall 156 // back to the supporting allocator. 157 if (State.GuardedPagePoolEnd == 0) 158 return nullptr; 159 160 // Protect against recursivity. 161 if (ThreadLocals.RecursiveGuard) 162 return nullptr; 163 ScopedBoolean SB(ThreadLocals.RecursiveGuard); 164 165 if (Size == 0 || Size > State.maximumAllocationSize()) 166 return nullptr; 167 168 size_t Index; 169 { 170 ScopedLock L(PoolMutex); 171 Index = reserveSlot(); 172 } 173 174 if (Index == kInvalidSlotID) 175 return nullptr; 176 177 uintptr_t Ptr = State.slotToAddr(Index); 178 Ptr += allocationSlotOffset(Size); 179 AllocationMetadata *Meta = addrToMetadata(Ptr); 180 181 // If a slot is multiple pages in size, and the allocation takes up a single 182 // page, we can improve overflow detection by leaving the unused pages as 183 // unmapped. 184 markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr, State.PageSize)), 185 Size, kGwpAsanAliveSlotName); 186 187 Meta->RecordAllocation(Ptr, Size); 188 Meta->AllocationTrace.RecordBacktrace(Backtrace); 189 190 return reinterpret_cast<void *>(Ptr); 191 } 192 193 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) { 194 State.FailureType = E; 195 State.FailureAddress = Address; 196 197 // Raise a SEGV by touching first guard page. 198 volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool); 199 *p = 0; 200 __builtin_unreachable(); 201 } 202 203 void GuardedPoolAllocator::stop() { 204 ThreadLocals.RecursiveGuard = true; 205 PoolMutex.tryLock(); 206 } 207 208 void GuardedPoolAllocator::deallocate(void *Ptr) { 209 assert(pointerIsMine(Ptr) && "Pointer is not mine!"); 210 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr); 211 size_t Slot = State.getNearestSlot(UPtr); 212 uintptr_t SlotStart = State.slotToAddr(Slot); 213 AllocationMetadata *Meta = addrToMetadata(UPtr); 214 if (Meta->Addr != UPtr) { 215 // If multiple errors occur at the same time, use the first one. 216 ScopedLock L(PoolMutex); 217 trapOnAddress(UPtr, Error::INVALID_FREE); 218 } 219 220 // Intentionally scope the mutex here, so that other threads can access the 221 // pool during the expensive markInaccessible() call. 222 { 223 ScopedLock L(PoolMutex); 224 if (Meta->IsDeallocated) { 225 trapOnAddress(UPtr, Error::DOUBLE_FREE); 226 } 227 228 // Ensure that the deallocation is recorded before marking the page as 229 // inaccessible. Otherwise, a racy use-after-free will have inconsistent 230 // metadata. 231 Meta->RecordDeallocation(); 232 233 // Ensure that the unwinder is not called if the recursive flag is set, 234 // otherwise non-reentrant unwinders may deadlock. 235 if (!ThreadLocals.RecursiveGuard) { 236 ScopedBoolean B(ThreadLocals.RecursiveGuard); 237 Meta->DeallocationTrace.RecordBacktrace(Backtrace); 238 } 239 } 240 241 markInaccessible(reinterpret_cast<void *>(SlotStart), 242 State.maximumAllocationSize(), kGwpAsanGuardPageName); 243 244 // And finally, lock again to release the slot back into the pool. 245 ScopedLock L(PoolMutex); 246 freeSlot(Slot); 247 } 248 249 size_t GuardedPoolAllocator::getSize(const void *Ptr) { 250 assert(pointerIsMine(Ptr)); 251 ScopedLock L(PoolMutex); 252 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr)); 253 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr)); 254 return Meta->Size; 255 } 256 257 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { 258 return &Metadata[State.getNearestSlot(Ptr)]; 259 } 260 261 size_t GuardedPoolAllocator::reserveSlot() { 262 // Avoid potential reuse of a slot before we have made at least a single 263 // allocation in each slot. Helps with our use-after-free detection. 264 if (NumSampledAllocations < State.MaxSimultaneousAllocations) 265 return NumSampledAllocations++; 266 267 if (FreeSlotsLength == 0) 268 return kInvalidSlotID; 269 270 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength; 271 size_t SlotIndex = FreeSlots[ReservedIndex]; 272 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength]; 273 return SlotIndex; 274 } 275 276 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { 277 assert(FreeSlotsLength < State.MaxSimultaneousAllocations); 278 FreeSlots[FreeSlotsLength++] = SlotIndex; 279 } 280 281 uintptr_t GuardedPoolAllocator::allocationSlotOffset(size_t Size) const { 282 assert(Size > 0); 283 284 bool ShouldRightAlign = getRandomUnsigned32() % 2 == 0; 285 if (!ShouldRightAlign) 286 return 0; 287 288 uintptr_t Offset = State.maximumAllocationSize(); 289 if (!PerfectlyRightAlign) { 290 if (Size == 3) 291 Size = 4; 292 else if (Size > 4 && Size <= 8) 293 Size = 8; 294 else if (Size > 8 && (Size % 16) != 0) 295 Size += 16 - (Size % 16); 296 } 297 Offset -= Size; 298 return Offset; 299 } 300 301 GWP_ASAN_TLS_INITIAL_EXEC 302 GuardedPoolAllocator::ThreadLocalPackedVariables 303 GuardedPoolAllocator::ThreadLocals; 304 } // namespace gwp_asan 305