1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "gwp_asan/guarded_pool_allocator.h" 10 11 #include "gwp_asan/options.h" 12 #include "gwp_asan/utilities.h" 13 #include "optional/segv_handler.h" 14 15 // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this 16 // macro is defined before including <inttypes.h>. 17 #ifndef __STDC_FORMAT_MACROS 18 #define __STDC_FORMAT_MACROS 1 19 #endif 20 21 #include <assert.h> 22 #include <inttypes.h> 23 #include <signal.h> 24 #include <stdio.h> 25 #include <stdlib.h> 26 #include <string.h> 27 #include <time.h> 28 29 using AllocationMetadata = gwp_asan::AllocationMetadata; 30 using Error = gwp_asan::Error; 31 32 namespace gwp_asan { 33 namespace { 34 // Forward declare the pointer to the singleton version of this class. 35 // Instantiated during initialisation, this allows the signal handler 36 // to find this class in order to deduce the root cause of failures. Must not be 37 // referenced by users outside this translation unit, in order to avoid 38 // init-order-fiasco. 39 GuardedPoolAllocator *SingletonPtr = nullptr; 40 41 class ScopedBoolean { 42 public: 43 ScopedBoolean(bool &B) : Bool(B) { Bool = true; } 44 ~ScopedBoolean() { Bool = false; } 45 46 private: 47 bool &Bool; 48 }; 49 } // anonymous namespace 50 51 // Gets the singleton implementation of this class. Thread-compatible until 52 // init() is called, thread-safe afterwards. 53 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() { 54 return SingletonPtr; 55 } 56 57 void GuardedPoolAllocator::init(const options::Options &Opts) { 58 // Note: We return from the constructor here if GWP-ASan is not available. 59 // This will stop heap-allocation of class members, as well as mmap() of the 60 // guarded slots. 61 if (!Opts.Enabled || Opts.SampleRate == 0 || 62 Opts.MaxSimultaneousAllocations == 0) 63 return; 64 65 Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0."); 66 Check(Opts.SampleRate <= INT32_MAX, "GWP-ASan Error: SampleRate is > 2^31."); 67 Check(Opts.MaxSimultaneousAllocations >= 0, 68 "GWP-ASan Error: MaxSimultaneousAllocations is < 0."); 69 70 SingletonPtr = this; 71 Backtrace = Opts.Backtrace; 72 73 State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; 74 75 State.PageSize = getPlatformPageSize(); 76 77 PerfectlyRightAlign = Opts.PerfectlyRightAlign; 78 79 size_t PoolBytesRequired = 80 State.PageSize * (1 + State.MaxSimultaneousAllocations) + 81 State.MaxSimultaneousAllocations * State.maximumAllocationSize(); 82 void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName); 83 84 size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata); 85 Metadata = reinterpret_cast<AllocationMetadata *>( 86 mapMemory(BytesRequired, kGwpAsanMetadataName)); 87 markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName); 88 89 // Allocate memory and set up the free pages queue. 90 BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots); 91 FreeSlots = reinterpret_cast<size_t *>( 92 mapMemory(BytesRequired, kGwpAsanFreeSlotsName)); 93 markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName); 94 95 // Multiply the sample rate by 2 to give a good, fast approximation for (1 / 96 // SampleRate) chance of sampling. 97 if (Opts.SampleRate != 1) 98 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1; 99 else 100 AdjustedSampleRatePlusOne = 2; 101 102 ThreadLocals.NextSampleCounter = 103 (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1; 104 105 State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); 106 State.GuardedPagePoolEnd = 107 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired; 108 109 if (Opts.InstallForkHandlers) 110 installAtFork(); 111 } 112 113 void GuardedPoolAllocator::disable() { PoolMutex.lock(); } 114 115 void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } 116 117 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, 118 void *Arg) { 119 uintptr_t Start = reinterpret_cast<uintptr_t>(Base); 120 for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) { 121 const AllocationMetadata &Meta = Metadata[i]; 122 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && 123 Meta.Addr < Start + Size) 124 Cb(Meta.Addr, Meta.Size, Arg); 125 } 126 } 127 128 void GuardedPoolAllocator::uninitTestOnly() { 129 if (State.GuardedPagePool) { 130 unmapMemory(reinterpret_cast<void *>(State.GuardedPagePool), 131 State.GuardedPagePoolEnd - State.GuardedPagePool, 132 kGwpAsanGuardPageName); 133 State.GuardedPagePool = 0; 134 State.GuardedPagePoolEnd = 0; 135 } 136 if (Metadata) { 137 unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata), 138 kGwpAsanMetadataName); 139 Metadata = nullptr; 140 } 141 if (FreeSlots) { 142 unmapMemory(FreeSlots, 143 State.MaxSimultaneousAllocations * sizeof(*FreeSlots), 144 kGwpAsanFreeSlotsName); 145 FreeSlots = nullptr; 146 } 147 } 148 149 static uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) { 150 return Ptr & ~(PageSize - 1); 151 } 152 153 void *GuardedPoolAllocator::allocate(size_t Size) { 154 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall 155 // back to the supporting allocator. 156 if (State.GuardedPagePoolEnd == 0) 157 return nullptr; 158 159 // Protect against recursivity. 160 if (ThreadLocals.RecursiveGuard) 161 return nullptr; 162 ScopedBoolean SB(ThreadLocals.RecursiveGuard); 163 164 if (Size == 0 || Size > State.maximumAllocationSize()) 165 return nullptr; 166 167 size_t Index; 168 { 169 ScopedLock L(PoolMutex); 170 Index = reserveSlot(); 171 } 172 173 if (Index == kInvalidSlotID) 174 return nullptr; 175 176 uintptr_t Ptr = State.slotToAddr(Index); 177 Ptr += allocationSlotOffset(Size); 178 AllocationMetadata *Meta = addrToMetadata(Ptr); 179 180 // If a slot is multiple pages in size, and the allocation takes up a single 181 // page, we can improve overflow detection by leaving the unused pages as 182 // unmapped. 183 markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr, State.PageSize)), 184 Size, kGwpAsanAliveSlotName); 185 186 Meta->RecordAllocation(Ptr, Size); 187 Meta->AllocationTrace.RecordBacktrace(Backtrace); 188 189 return reinterpret_cast<void *>(Ptr); 190 } 191 192 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) { 193 State.FailureType = E; 194 State.FailureAddress = Address; 195 196 // Raise a SEGV by touching first guard page. 197 volatile char *p = reinterpret_cast<char*>(State.GuardedPagePool); 198 *p = 0; 199 __builtin_unreachable(); 200 } 201 202 void GuardedPoolAllocator::stop() { 203 ThreadLocals.RecursiveGuard = true; 204 PoolMutex.tryLock(); 205 } 206 207 void GuardedPoolAllocator::deallocate(void *Ptr) { 208 assert(pointerIsMine(Ptr) && "Pointer is not mine!"); 209 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr); 210 size_t Slot = State.getNearestSlot(UPtr); 211 uintptr_t SlotStart = State.slotToAddr(Slot); 212 AllocationMetadata *Meta = addrToMetadata(UPtr); 213 if (Meta->Addr != UPtr) { 214 // If multiple errors occur at the same time, use the first one. 215 ScopedLock L(PoolMutex); 216 trapOnAddress(UPtr, Error::INVALID_FREE); 217 } 218 219 // Intentionally scope the mutex here, so that other threads can access the 220 // pool during the expensive markInaccessible() call. 221 { 222 ScopedLock L(PoolMutex); 223 if (Meta->IsDeallocated) { 224 trapOnAddress(UPtr, Error::DOUBLE_FREE); 225 } 226 227 // Ensure that the deallocation is recorded before marking the page as 228 // inaccessible. Otherwise, a racy use-after-free will have inconsistent 229 // metadata. 230 Meta->RecordDeallocation(); 231 232 // Ensure that the unwinder is not called if the recursive flag is set, 233 // otherwise non-reentrant unwinders may deadlock. 234 if (!ThreadLocals.RecursiveGuard) { 235 ScopedBoolean B(ThreadLocals.RecursiveGuard); 236 Meta->DeallocationTrace.RecordBacktrace(Backtrace); 237 } 238 } 239 240 markInaccessible(reinterpret_cast<void *>(SlotStart), 241 State.maximumAllocationSize(), kGwpAsanGuardPageName); 242 243 // And finally, lock again to release the slot back into the pool. 244 ScopedLock L(PoolMutex); 245 freeSlot(Slot); 246 } 247 248 size_t GuardedPoolAllocator::getSize(const void *Ptr) { 249 assert(pointerIsMine(Ptr)); 250 ScopedLock L(PoolMutex); 251 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr)); 252 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr)); 253 return Meta->Size; 254 } 255 256 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { 257 return &Metadata[State.getNearestSlot(Ptr)]; 258 } 259 260 size_t GuardedPoolAllocator::reserveSlot() { 261 // Avoid potential reuse of a slot before we have made at least a single 262 // allocation in each slot. Helps with our use-after-free detection. 263 if (NumSampledAllocations < State.MaxSimultaneousAllocations) 264 return NumSampledAllocations++; 265 266 if (FreeSlotsLength == 0) 267 return kInvalidSlotID; 268 269 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength; 270 size_t SlotIndex = FreeSlots[ReservedIndex]; 271 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength]; 272 return SlotIndex; 273 } 274 275 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { 276 assert(FreeSlotsLength < State.MaxSimultaneousAllocations); 277 FreeSlots[FreeSlotsLength++] = SlotIndex; 278 } 279 280 uintptr_t GuardedPoolAllocator::allocationSlotOffset(size_t Size) const { 281 assert(Size > 0); 282 283 bool ShouldRightAlign = getRandomUnsigned32() % 2 == 0; 284 if (!ShouldRightAlign) 285 return 0; 286 287 uintptr_t Offset = State.maximumAllocationSize(); 288 if (!PerfectlyRightAlign) { 289 if (Size == 3) 290 Size = 4; 291 else if (Size > 4 && Size <= 8) 292 Size = 8; 293 else if (Size > 8 && (Size % 16) != 0) 294 Size += 16 - (Size % 16); 295 } 296 Offset -= Size; 297 return Offset; 298 } 299 300 GWP_ASAN_TLS_INITIAL_EXEC 301 GuardedPoolAllocator::ThreadLocalPackedVariables 302 GuardedPoolAllocator::ThreadLocals; 303 } // namespace gwp_asan 304