xref: /llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp (revision 11e12bd82b0358de053cfd0dc762e9b30948c8a1)
1a95edb9dSMitch Phillips //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
2a95edb9dSMitch Phillips //
3a95edb9dSMitch Phillips // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4a95edb9dSMitch Phillips // See https://llvm.org/LICENSE.txt for license information.
5a95edb9dSMitch Phillips // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6a95edb9dSMitch Phillips //
7a95edb9dSMitch Phillips //===----------------------------------------------------------------------===//
8a95edb9dSMitch Phillips 
9a95edb9dSMitch Phillips #include "gwp_asan/guarded_pool_allocator.h"
10a95edb9dSMitch Phillips 
1135b5499dSMitch Phillips #include "gwp_asan/crash_handler.h"
12a95edb9dSMitch Phillips #include "gwp_asan/options.h"
13a6258684SMitch Phillips #include "gwp_asan/utilities.h"
14a95edb9dSMitch Phillips 
15a95edb9dSMitch Phillips #include <assert.h>
163d8823b8SMitch Phillips #include <stddef.h>
17a95edb9dSMitch Phillips 
18a6258684SMitch Phillips using AllocationMetadata = gwp_asan::AllocationMetadata;
19a6258684SMitch Phillips using Error = gwp_asan::Error;
20a95edb9dSMitch Phillips 
21a95edb9dSMitch Phillips namespace gwp_asan {
22a95edb9dSMitch Phillips namespace {
23a95edb9dSMitch Phillips // Forward declare the pointer to the singleton version of this class.
24a95edb9dSMitch Phillips // Instantiated during initialisation, this allows the signal handler
25a95edb9dSMitch Phillips // to find this class in order to deduce the root cause of failures. Must not be
26a95edb9dSMitch Phillips // referenced by users outside this translation unit, in order to avoid
27a95edb9dSMitch Phillips // init-order-fiasco.
28a95edb9dSMitch Phillips GuardedPoolAllocator *SingletonPtr = nullptr;
2990678f65SKostya Kortchinsky 
roundUpTo(size_t Size,size_t Boundary)3090678f65SKostya Kortchinsky size_t roundUpTo(size_t Size, size_t Boundary) {
3190678f65SKostya Kortchinsky   return (Size + Boundary - 1) & ~(Boundary - 1);
3290678f65SKostya Kortchinsky }
3390678f65SKostya Kortchinsky 
getPageAddr(uintptr_t Ptr,uintptr_t PageSize)3490678f65SKostya Kortchinsky uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
3590678f65SKostya Kortchinsky   return Ptr & ~(PageSize - 1);
3690678f65SKostya Kortchinsky }
373d8823b8SMitch Phillips 
isPowerOfTwo(uintptr_t X)383d8823b8SMitch Phillips bool isPowerOfTwo(uintptr_t X) { return (X & (X - 1)) == 0; }
39a95edb9dSMitch Phillips } // anonymous namespace
40a95edb9dSMitch Phillips 
41a95edb9dSMitch Phillips // Gets the singleton implementation of this class. Thread-compatible until
42a95edb9dSMitch Phillips // init() is called, thread-safe afterwards.
getSingleton()43596d0614SEvgenii Stepanov GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
44596d0614SEvgenii Stepanov   return SingletonPtr;
45596d0614SEvgenii Stepanov }
46a95edb9dSMitch Phillips 
init(const options::Options & Opts)47a95edb9dSMitch Phillips void GuardedPoolAllocator::init(const options::Options &Opts) {
48a95edb9dSMitch Phillips   // Note: We return from the constructor here if GWP-ASan is not available.
49a95edb9dSMitch Phillips   // This will stop heap-allocation of class members, as well as mmap() of the
50a95edb9dSMitch Phillips   // guarded slots.
51a95edb9dSMitch Phillips   if (!Opts.Enabled || Opts.SampleRate == 0 ||
52a95edb9dSMitch Phillips       Opts.MaxSimultaneousAllocations == 0)
53a95edb9dSMitch Phillips     return;
54a95edb9dSMitch Phillips 
55*11e12bd8SMitch Phillips   check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
56*11e12bd8SMitch Phillips   check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
57*11e12bd8SMitch Phillips   check(Opts.MaxSimultaneousAllocations >= 0,
58a6258684SMitch Phillips         "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
59a95edb9dSMitch Phillips 
60*11e12bd8SMitch Phillips   check(SingletonPtr == nullptr,
617adb7aa4SMitch Phillips         "There's already a live GuardedPoolAllocator!");
62a95edb9dSMitch Phillips   SingletonPtr = this;
63a6258684SMitch Phillips   Backtrace = Opts.Backtrace;
64a95edb9dSMitch Phillips 
6504f59133SKostya Kortchinsky   State.VersionMagic = {{AllocatorVersionMagic::kAllocatorVersionMagic[0],
6604f59133SKostya Kortchinsky                          AllocatorVersionMagic::kAllocatorVersionMagic[1],
6704f59133SKostya Kortchinsky                          AllocatorVersionMagic::kAllocatorVersionMagic[2],
6804f59133SKostya Kortchinsky                          AllocatorVersionMagic::kAllocatorVersionMagic[3]},
6904f59133SKostya Kortchinsky                         AllocatorVersionMagic::kAllocatorVersion,
7004f59133SKostya Kortchinsky                         0};
7104f59133SKostya Kortchinsky 
72a6258684SMitch Phillips   State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
73a95edb9dSMitch Phillips 
74612e02eeSKostya Kortchinsky   const size_t PageSize = getPlatformPageSize();
75612e02eeSKostya Kortchinsky   // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
76612e02eeSKostya Kortchinsky   assert((PageSize & (PageSize - 1)) == 0);
77612e02eeSKostya Kortchinsky   State.PageSize = PageSize;
78a95edb9dSMitch Phillips 
7935b5499dSMitch Phillips   // Number of pages required =
8035b5499dSMitch Phillips   //  + MaxSimultaneousAllocations * maximumAllocationSize (N pages per slot)
8135b5499dSMitch Phillips   //  + MaxSimultaneousAllocations (one guard on the left side of each slot)
8235b5499dSMitch Phillips   //  + 1 (an extra guard page at the end of the pool, on the right side)
8335b5499dSMitch Phillips   //  + 1 (an extra page that's used for reporting internally-detected crashes,
8435b5499dSMitch Phillips   //       like double free and invalid free, to the signal handler; see
8535b5499dSMitch Phillips   //       raiseInternallyDetectedError() for more info)
86a95edb9dSMitch Phillips   size_t PoolBytesRequired =
8735b5499dSMitch Phillips       PageSize * (2 + State.MaxSimultaneousAllocations) +
88a6258684SMitch Phillips       State.MaxSimultaneousAllocations * State.maximumAllocationSize();
89612e02eeSKostya Kortchinsky   assert(PoolBytesRequired % PageSize == 0);
90612e02eeSKostya Kortchinsky   void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
91a95edb9dSMitch Phillips 
92612e02eeSKostya Kortchinsky   size_t BytesRequired =
93612e02eeSKostya Kortchinsky       roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
94e1440f59SMitch Phillips   Metadata = reinterpret_cast<AllocationMetadata *>(
95612e02eeSKostya Kortchinsky       map(BytesRequired, kGwpAsanMetadataName));
96a95edb9dSMitch Phillips 
97a95edb9dSMitch Phillips   // Allocate memory and set up the free pages queue.
98612e02eeSKostya Kortchinsky   BytesRequired = roundUpTo(
99612e02eeSKostya Kortchinsky       State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
100612e02eeSKostya Kortchinsky   FreeSlots =
101612e02eeSKostya Kortchinsky       reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
102a95edb9dSMitch Phillips 
103a95edb9dSMitch Phillips   // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
104a95edb9dSMitch Phillips   // SampleRate) chance of sampling.
105a95edb9dSMitch Phillips   if (Opts.SampleRate != 1)
106596d0614SEvgenii Stepanov     AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
107a95edb9dSMitch Phillips   else
108596d0614SEvgenii Stepanov     AdjustedSampleRatePlusOne = 2;
109a95edb9dSMitch Phillips 
11025de3f98SMitch Phillips   initPRNG();
11190678f65SKostya Kortchinsky   getThreadLocals()->NextSampleCounter =
1123580a450SKostya Kortchinsky       ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
1133580a450SKostya Kortchinsky       ThreadLocalPackedVariables::NextSampleCounterMask;
1140d6fccb4SMitch Phillips 
115a6258684SMitch Phillips   State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
116a6258684SMitch Phillips   State.GuardedPagePoolEnd =
117a95edb9dSMitch Phillips       reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
118a95edb9dSMitch Phillips 
119596d0614SEvgenii Stepanov   if (Opts.InstallForkHandlers)
120596d0614SEvgenii Stepanov     installAtFork();
121596d0614SEvgenii Stepanov }
122596d0614SEvgenii Stepanov 
disable()12330973f6fSMitch Phillips void GuardedPoolAllocator::disable() {
12430973f6fSMitch Phillips   PoolMutex.lock();
12530973f6fSMitch Phillips   BacktraceMutex.lock();
12630973f6fSMitch Phillips }
127596d0614SEvgenii Stepanov 
enable()12830973f6fSMitch Phillips void GuardedPoolAllocator::enable() {
12930973f6fSMitch Phillips   PoolMutex.unlock();
13030973f6fSMitch Phillips   BacktraceMutex.unlock();
13130973f6fSMitch Phillips }
132596d0614SEvgenii Stepanov 
iterate(void * Base,size_t Size,iterate_callback Cb,void * Arg)13346044a69SEvgenii Stepanov void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
13446044a69SEvgenii Stepanov                                    void *Arg) {
13546044a69SEvgenii Stepanov   uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
136a6258684SMitch Phillips   for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
13746044a69SEvgenii Stepanov     const AllocationMetadata &Meta = Metadata[i];
13846044a69SEvgenii Stepanov     if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
13946044a69SEvgenii Stepanov         Meta.Addr < Start + Size)
1403d8823b8SMitch Phillips       Cb(Meta.Addr, Meta.RequestedSize, Arg);
14146044a69SEvgenii Stepanov   }
14246044a69SEvgenii Stepanov }
14346044a69SEvgenii Stepanov 
uninitTestOnly()144596d0614SEvgenii Stepanov void GuardedPoolAllocator::uninitTestOnly() {
145a6258684SMitch Phillips   if (State.GuardedPagePool) {
146612e02eeSKostya Kortchinsky     unreserveGuardedPool();
147a6258684SMitch Phillips     State.GuardedPagePool = 0;
148a6258684SMitch Phillips     State.GuardedPagePoolEnd = 0;
149596d0614SEvgenii Stepanov   }
150596d0614SEvgenii Stepanov   if (Metadata) {
151612e02eeSKostya Kortchinsky     unmap(Metadata,
152612e02eeSKostya Kortchinsky           roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
153612e02eeSKostya Kortchinsky                     State.PageSize));
154596d0614SEvgenii Stepanov     Metadata = nullptr;
155596d0614SEvgenii Stepanov   }
156596d0614SEvgenii Stepanov   if (FreeSlots) {
157612e02eeSKostya Kortchinsky     unmap(FreeSlots,
158612e02eeSKostya Kortchinsky           roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
159612e02eeSKostya Kortchinsky                     State.PageSize));
160596d0614SEvgenii Stepanov     FreeSlots = nullptr;
161596d0614SEvgenii Stepanov   }
162c904c32bSKostya Kortchinsky   *getThreadLocals() = ThreadLocalPackedVariables();
1637adb7aa4SMitch Phillips   SingletonPtr = nullptr;
164a6258684SMitch Phillips }
165a6258684SMitch Phillips 
1663d8823b8SMitch Phillips // Note, minimum backing allocation size in GWP-ASan is always one page, and
1673d8823b8SMitch Phillips // each slot could potentially be multiple pages (but always in
1683d8823b8SMitch Phillips // page-increments). Thus, for anything that requires less than page size
1693d8823b8SMitch Phillips // alignment, we don't need to allocate extra padding to ensure the alignment
1703d8823b8SMitch Phillips // can be met.
getRequiredBackingSize(size_t Size,size_t Alignment,size_t PageSize)1713d8823b8SMitch Phillips size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
1723d8823b8SMitch Phillips                                                     size_t Alignment,
1733d8823b8SMitch Phillips                                                     size_t PageSize) {
1743d8823b8SMitch Phillips   assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
1753d8823b8SMitch Phillips   assert(Alignment != 0 && "Alignment should be non-zero");
1763d8823b8SMitch Phillips   assert(Size != 0 && "Size should be non-zero");
1773d8823b8SMitch Phillips 
1783d8823b8SMitch Phillips   if (Alignment <= PageSize)
1793d8823b8SMitch Phillips     return Size;
1803d8823b8SMitch Phillips 
1813d8823b8SMitch Phillips   return Size + Alignment - PageSize;
1823d8823b8SMitch Phillips }
1833d8823b8SMitch Phillips 
alignUp(uintptr_t Ptr,size_t Alignment)1843d8823b8SMitch Phillips uintptr_t GuardedPoolAllocator::alignUp(uintptr_t Ptr, size_t Alignment) {
1853d8823b8SMitch Phillips   assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
1863d8823b8SMitch Phillips   assert(Alignment != 0 && "Alignment should be non-zero");
1873d8823b8SMitch Phillips   if ((Ptr & (Alignment - 1)) == 0)
1883d8823b8SMitch Phillips     return Ptr;
1893d8823b8SMitch Phillips 
1903d8823b8SMitch Phillips   Ptr += Alignment - (Ptr & (Alignment - 1));
1913d8823b8SMitch Phillips   return Ptr;
1923d8823b8SMitch Phillips }
1933d8823b8SMitch Phillips 
alignDown(uintptr_t Ptr,size_t Alignment)1943d8823b8SMitch Phillips uintptr_t GuardedPoolAllocator::alignDown(uintptr_t Ptr, size_t Alignment) {
1953d8823b8SMitch Phillips   assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
1963d8823b8SMitch Phillips   assert(Alignment != 0 && "Alignment should be non-zero");
1973d8823b8SMitch Phillips   if ((Ptr & (Alignment - 1)) == 0)
1983d8823b8SMitch Phillips     return Ptr;
1993d8823b8SMitch Phillips 
2003d8823b8SMitch Phillips   Ptr -= Ptr & (Alignment - 1);
2013d8823b8SMitch Phillips   return Ptr;
2023d8823b8SMitch Phillips }
2033d8823b8SMitch Phillips 
allocate(size_t Size,size_t Alignment)2043d8823b8SMitch Phillips void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
20505d1a2bdSMitch Phillips   // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
20605d1a2bdSMitch Phillips   // back to the supporting allocator.
2073580a450SKostya Kortchinsky   if (State.GuardedPagePoolEnd == 0) {
20890678f65SKostya Kortchinsky     getThreadLocals()->NextSampleCounter =
2093580a450SKostya Kortchinsky         (AdjustedSampleRatePlusOne - 1) &
2103580a450SKostya Kortchinsky         ThreadLocalPackedVariables::NextSampleCounterMask;
21105d1a2bdSMitch Phillips     return nullptr;
2123580a450SKostya Kortchinsky   }
21305d1a2bdSMitch Phillips 
2143d8823b8SMitch Phillips   if (Size == 0)
2153d8823b8SMitch Phillips     Size = 1;
2163d8823b8SMitch Phillips   if (Alignment == 0)
2173d8823b8SMitch Phillips     Alignment = alignof(max_align_t);
2183d8823b8SMitch Phillips 
2193d8823b8SMitch Phillips   if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
2203d8823b8SMitch Phillips       Size > State.maximumAllocationSize())
2213d8823b8SMitch Phillips     return nullptr;
2223d8823b8SMitch Phillips 
2233d8823b8SMitch Phillips   size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
2243d8823b8SMitch Phillips   if (BackingSize > State.maximumAllocationSize())
2253d8823b8SMitch Phillips     return nullptr;
2263d8823b8SMitch Phillips 
22705d1a2bdSMitch Phillips   // Protect against recursivity.
22890678f65SKostya Kortchinsky   if (getThreadLocals()->RecursiveGuard)
22905d1a2bdSMitch Phillips     return nullptr;
2303580a450SKostya Kortchinsky   ScopedRecursiveGuard SRG;
23105d1a2bdSMitch Phillips 
232a95edb9dSMitch Phillips   size_t Index;
233a95edb9dSMitch Phillips   {
234a95edb9dSMitch Phillips     ScopedLock L(PoolMutex);
235a95edb9dSMitch Phillips     Index = reserveSlot();
236a95edb9dSMitch Phillips   }
237a95edb9dSMitch Phillips 
238a95edb9dSMitch Phillips   if (Index == kInvalidSlotID)
239a95edb9dSMitch Phillips     return nullptr;
240a95edb9dSMitch Phillips 
2413d8823b8SMitch Phillips   uintptr_t SlotStart = State.slotToAddr(Index);
2423d8823b8SMitch Phillips   AllocationMetadata *Meta = addrToMetadata(SlotStart);
2433d8823b8SMitch Phillips   uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
2443d8823b8SMitch Phillips   uintptr_t UserPtr;
2453d8823b8SMitch Phillips   // Randomly choose whether to left-align or right-align the allocation, and
2463d8823b8SMitch Phillips   // then apply the necessary adjustments to get an aligned pointer.
2473d8823b8SMitch Phillips   if (getRandomUnsigned32() % 2 == 0)
2483d8823b8SMitch Phillips     UserPtr = alignUp(SlotStart, Alignment);
2493d8823b8SMitch Phillips   else
2503d8823b8SMitch Phillips     UserPtr = alignDown(SlotEnd - Size, Alignment);
2513d8823b8SMitch Phillips 
2523d8823b8SMitch Phillips   assert(UserPtr >= SlotStart);
2533d8823b8SMitch Phillips   assert(UserPtr + Size <= SlotEnd);
254a95edb9dSMitch Phillips 
255a95edb9dSMitch Phillips   // If a slot is multiple pages in size, and the allocation takes up a single
256a95edb9dSMitch Phillips   // page, we can improve overflow detection by leaving the unused pages as
257a95edb9dSMitch Phillips   // unmapped.
258612e02eeSKostya Kortchinsky   const size_t PageSize = State.PageSize;
2593d8823b8SMitch Phillips   allocateInGuardedPool(
2603d8823b8SMitch Phillips       reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
261612e02eeSKostya Kortchinsky       roundUpTo(Size, PageSize));
262a95edb9dSMitch Phillips 
2633d8823b8SMitch Phillips   Meta->RecordAllocation(UserPtr, Size);
26430973f6fSMitch Phillips   {
26530973f6fSMitch Phillips     ScopedLock UL(BacktraceMutex);
266a6258684SMitch Phillips     Meta->AllocationTrace.RecordBacktrace(Backtrace);
26730973f6fSMitch Phillips   }
268a95edb9dSMitch Phillips 
2693d8823b8SMitch Phillips   return reinterpret_cast<void *>(UserPtr);
270a95edb9dSMitch Phillips }
271a95edb9dSMitch Phillips 
raiseInternallyDetectedError(uintptr_t Address,Error E)27235b5499dSMitch Phillips void GuardedPoolAllocator::raiseInternallyDetectedError(uintptr_t Address,
27335b5499dSMitch Phillips                                                         Error E) {
27435b5499dSMitch Phillips   // Disable the allocator before setting the internal failure state. In
27535b5499dSMitch Phillips   // non-recoverable mode, the allocator will be permanently disabled, and so
27635b5499dSMitch Phillips   // things will be accessed without locks.
27735b5499dSMitch Phillips   disable();
27835b5499dSMitch Phillips 
27935b5499dSMitch Phillips   // Races between internally- and externally-raised faults can happen. Right
28035b5499dSMitch Phillips   // now, in this thread we've locked the allocator in order to raise an
28135b5499dSMitch Phillips   // internally-detected fault, and another thread could SIGSEGV to raise an
28235b5499dSMitch Phillips   // externally-detected fault. What will happen is that the other thread will
28335b5499dSMitch Phillips   // wait in the signal handler, as we hold the allocator's locks from the
28435b5499dSMitch Phillips   // disable() above. We'll trigger the signal handler by touching the
28535b5499dSMitch Phillips   // internal-signal-raising address below, and the signal handler from our
28635b5499dSMitch Phillips   // thread will get to run first as we will continue to hold the allocator
28735b5499dSMitch Phillips   // locks until the enable() at the end of this function. Be careful though, if
28835b5499dSMitch Phillips   // this thread receives another SIGSEGV after the disable() above, but before
28935b5499dSMitch Phillips   // touching the internal-signal-raising address below, then this thread will
29035b5499dSMitch Phillips   // get an "externally-raised" SIGSEGV while *also* holding the allocator
29135b5499dSMitch Phillips   // locks, which means this thread's signal handler will deadlock. This could
29235b5499dSMitch Phillips   // be resolved with a re-entrant lock, but asking platforms to implement this
29335b5499dSMitch Phillips   // seems unnecessary given the only way to get a SIGSEGV in this critical
29435b5499dSMitch Phillips   // section is either a memory safety bug in the couple lines of code below (be
29535b5499dSMitch Phillips   // careful!), or someone outside uses `kill(this_thread, SIGSEGV)`, which
29635b5499dSMitch Phillips   // really shouldn't happen.
29735b5499dSMitch Phillips 
298a6258684SMitch Phillips   State.FailureType = E;
299a6258684SMitch Phillips   State.FailureAddress = Address;
300a6258684SMitch Phillips 
30135b5499dSMitch Phillips   // Raise a SEGV by touching a specific address that identifies to the crash
30235b5499dSMitch Phillips   // handler that this is an internally-raised fault. Changing this address?
30335b5499dSMitch Phillips   // Don't forget to update __gwp_asan_get_internal_crash_address.
30435b5499dSMitch Phillips   volatile char *p =
30535b5499dSMitch Phillips       reinterpret_cast<char *>(State.internallyDetectedErrorFaultAddress());
306a6258684SMitch Phillips   *p = 0;
307a6258684SMitch Phillips 
30835b5499dSMitch Phillips   // This should never be reached in non-recoverable mode. Ensure that the
30935b5499dSMitch Phillips   // signal handler called handleRecoverablePostCrashReport(), which was
31035b5499dSMitch Phillips   // responsible for re-setting these fields.
31135b5499dSMitch Phillips   assert(State.FailureType == Error::UNKNOWN);
31235b5499dSMitch Phillips   assert(State.FailureAddress == 0u);
31335b5499dSMitch Phillips 
31435b5499dSMitch Phillips   // In recoverable mode, the signal handler (after dumping the crash) marked
31535b5499dSMitch Phillips   // the page containing the InternalFaultSegvAddress as read/writeable, to
31635b5499dSMitch Phillips   // allow the second touch to succeed after returning from the signal handler.
31735b5499dSMitch Phillips   // Now, we need to mark the page as non-read/write-able again, so future
31835b5499dSMitch Phillips   // internal faults can be raised.
31935b5499dSMitch Phillips   deallocateInGuardedPool(
32035b5499dSMitch Phillips       reinterpret_cast<void *>(getPageAddr(
32135b5499dSMitch Phillips           State.internallyDetectedErrorFaultAddress(), State.PageSize)),
32235b5499dSMitch Phillips       State.PageSize);
32335b5499dSMitch Phillips 
32435b5499dSMitch Phillips   // And now we're done with patching ourselves back up, enable the allocator.
32535b5499dSMitch Phillips   enable();
326a6258684SMitch Phillips }
327a6258684SMitch Phillips 
deallocate(void * Ptr)328a95edb9dSMitch Phillips void GuardedPoolAllocator::deallocate(void *Ptr) {
329a95edb9dSMitch Phillips   assert(pointerIsMine(Ptr) && "Pointer is not mine!");
330a95edb9dSMitch Phillips   uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
331a6258684SMitch Phillips   size_t Slot = State.getNearestSlot(UPtr);
332a6258684SMitch Phillips   uintptr_t SlotStart = State.slotToAddr(Slot);
333a95edb9dSMitch Phillips   AllocationMetadata *Meta = addrToMetadata(UPtr);
33435b5499dSMitch Phillips 
33535b5499dSMitch Phillips   // If this allocation is responsible for crash, never recycle it. Turn the
33635b5499dSMitch Phillips   // deallocate() call into a no-op.
33735b5499dSMitch Phillips   if (Meta->HasCrashed)
33835b5499dSMitch Phillips     return;
33935b5499dSMitch Phillips 
340a95edb9dSMitch Phillips   if (Meta->Addr != UPtr) {
34135b5499dSMitch Phillips     raiseInternallyDetectedError(UPtr, Error::INVALID_FREE);
34235b5499dSMitch Phillips     return;
34335b5499dSMitch Phillips   }
34435b5499dSMitch Phillips   if (Meta->IsDeallocated) {
34535b5499dSMitch Phillips     raiseInternallyDetectedError(UPtr, Error::DOUBLE_FREE);
34635b5499dSMitch Phillips     return;
347a95edb9dSMitch Phillips   }
348a95edb9dSMitch Phillips 
349a95edb9dSMitch Phillips   // Intentionally scope the mutex here, so that other threads can access the
350a95edb9dSMitch Phillips   // pool during the expensive markInaccessible() call.
351a95edb9dSMitch Phillips   {
352a95edb9dSMitch Phillips     ScopedLock L(PoolMutex);
353a95edb9dSMitch Phillips 
354a95edb9dSMitch Phillips     // Ensure that the deallocation is recorded before marking the page as
355a95edb9dSMitch Phillips     // inaccessible. Otherwise, a racy use-after-free will have inconsistent
356a95edb9dSMitch Phillips     // metadata.
357a6258684SMitch Phillips     Meta->RecordDeallocation();
358a6258684SMitch Phillips 
359a6258684SMitch Phillips     // Ensure that the unwinder is not called if the recursive flag is set,
360a6258684SMitch Phillips     // otherwise non-reentrant unwinders may deadlock.
36190678f65SKostya Kortchinsky     if (!getThreadLocals()->RecursiveGuard) {
3623580a450SKostya Kortchinsky       ScopedRecursiveGuard SRG;
36330973f6fSMitch Phillips       ScopedLock UL(BacktraceMutex);
364a6258684SMitch Phillips       Meta->DeallocationTrace.RecordBacktrace(Backtrace);
365a6258684SMitch Phillips     }
366a95edb9dSMitch Phillips   }
367a95edb9dSMitch Phillips 
368612e02eeSKostya Kortchinsky   deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
369612e02eeSKostya Kortchinsky                           State.maximumAllocationSize());
370a95edb9dSMitch Phillips 
371a95edb9dSMitch Phillips   // And finally, lock again to release the slot back into the pool.
372a95edb9dSMitch Phillips   ScopedLock L(PoolMutex);
373a6258684SMitch Phillips   freeSlot(Slot);
374a95edb9dSMitch Phillips }
375a95edb9dSMitch Phillips 
37635b5499dSMitch Phillips // Thread-compatible, protected by PoolMutex.
37735b5499dSMitch Phillips static bool PreviousRecursiveGuard;
37835b5499dSMitch Phillips 
preCrashReport(void * Ptr)37935b5499dSMitch Phillips void GuardedPoolAllocator::preCrashReport(void *Ptr) {
38035b5499dSMitch Phillips   assert(pointerIsMine(Ptr) && "Pointer is not mine!");
38135b5499dSMitch Phillips   uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address(
38235b5499dSMitch Phillips       &State, reinterpret_cast<uintptr_t>(Ptr));
38335b5499dSMitch Phillips   if (!InternalCrashAddr)
38435b5499dSMitch Phillips     disable();
38535b5499dSMitch Phillips 
38635b5499dSMitch Phillips   // If something in the signal handler calls malloc() while dumping the
38735b5499dSMitch Phillips   // GWP-ASan report (e.g. backtrace_symbols()), make sure that GWP-ASan doesn't
38835b5499dSMitch Phillips   // service that allocation. `PreviousRecursiveGuard` is protected by the
38935b5499dSMitch Phillips   // allocator locks taken in disable(), either explicitly above for
39035b5499dSMitch Phillips   // externally-raised errors, or implicitly in raiseInternallyDetectedError()
39135b5499dSMitch Phillips   // for internally-detected errors.
39235b5499dSMitch Phillips   PreviousRecursiveGuard = getThreadLocals()->RecursiveGuard;
39335b5499dSMitch Phillips   getThreadLocals()->RecursiveGuard = true;
39435b5499dSMitch Phillips }
39535b5499dSMitch Phillips 
postCrashReportRecoverableOnly(void * SignalPtr)39635b5499dSMitch Phillips void GuardedPoolAllocator::postCrashReportRecoverableOnly(void *SignalPtr) {
39735b5499dSMitch Phillips   uintptr_t SignalUPtr = reinterpret_cast<uintptr_t>(SignalPtr);
39835b5499dSMitch Phillips   uintptr_t InternalCrashAddr =
39935b5499dSMitch Phillips       __gwp_asan_get_internal_crash_address(&State, SignalUPtr);
40035b5499dSMitch Phillips   uintptr_t ErrorUptr = InternalCrashAddr ?: SignalUPtr;
40135b5499dSMitch Phillips 
40235b5499dSMitch Phillips   AllocationMetadata *Metadata = addrToMetadata(ErrorUptr);
40335b5499dSMitch Phillips   Metadata->HasCrashed = true;
40435b5499dSMitch Phillips 
40535b5499dSMitch Phillips   allocateInGuardedPool(
40635b5499dSMitch Phillips       reinterpret_cast<void *>(getPageAddr(SignalUPtr, State.PageSize)),
40735b5499dSMitch Phillips       State.PageSize);
40835b5499dSMitch Phillips 
40935b5499dSMitch Phillips   // Clear the internal state in order to not confuse the crash handler if a
41035b5499dSMitch Phillips   // use-after-free or buffer-overflow comes from a different allocation in the
41135b5499dSMitch Phillips   // future.
41235b5499dSMitch Phillips   if (InternalCrashAddr) {
41335b5499dSMitch Phillips     State.FailureType = Error::UNKNOWN;
41435b5499dSMitch Phillips     State.FailureAddress = 0;
41535b5499dSMitch Phillips   }
41635b5499dSMitch Phillips 
41735b5499dSMitch Phillips   size_t Slot = State.getNearestSlot(ErrorUptr);
41835b5499dSMitch Phillips   // If the slot is available, remove it permanently.
41935b5499dSMitch Phillips   for (size_t i = 0; i < FreeSlotsLength; ++i) {
42035b5499dSMitch Phillips     if (FreeSlots[i] == Slot) {
42135b5499dSMitch Phillips       FreeSlots[i] = FreeSlots[FreeSlotsLength - 1];
42235b5499dSMitch Phillips       FreeSlotsLength -= 1;
42335b5499dSMitch Phillips       break;
42435b5499dSMitch Phillips     }
42535b5499dSMitch Phillips   }
42635b5499dSMitch Phillips 
42735b5499dSMitch Phillips   getThreadLocals()->RecursiveGuard = PreviousRecursiveGuard;
42835b5499dSMitch Phillips   if (!InternalCrashAddr)
42935b5499dSMitch Phillips     enable();
43035b5499dSMitch Phillips }
43135b5499dSMitch Phillips 
getSize(const void * Ptr)432a95edb9dSMitch Phillips size_t GuardedPoolAllocator::getSize(const void *Ptr) {
433a95edb9dSMitch Phillips   assert(pointerIsMine(Ptr));
434a95edb9dSMitch Phillips   ScopedLock L(PoolMutex);
435a95edb9dSMitch Phillips   AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
436a95edb9dSMitch Phillips   assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
4373d8823b8SMitch Phillips   return Meta->RequestedSize;
438a95edb9dSMitch Phillips }
439a95edb9dSMitch Phillips 
addrToMetadata(uintptr_t Ptr) const440a95edb9dSMitch Phillips AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
441a6258684SMitch Phillips   return &Metadata[State.getNearestSlot(Ptr)];
442a95edb9dSMitch Phillips }
443a95edb9dSMitch Phillips 
reserveSlot()444a95edb9dSMitch Phillips size_t GuardedPoolAllocator::reserveSlot() {
445a95edb9dSMitch Phillips   // Avoid potential reuse of a slot before we have made at least a single
446a95edb9dSMitch Phillips   // allocation in each slot. Helps with our use-after-free detection.
447a6258684SMitch Phillips   if (NumSampledAllocations < State.MaxSimultaneousAllocations)
448a95edb9dSMitch Phillips     return NumSampledAllocations++;
449a95edb9dSMitch Phillips 
450a95edb9dSMitch Phillips   if (FreeSlotsLength == 0)
451a95edb9dSMitch Phillips     return kInvalidSlotID;
452a95edb9dSMitch Phillips 
453a95edb9dSMitch Phillips   size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
454a95edb9dSMitch Phillips   size_t SlotIndex = FreeSlots[ReservedIndex];
455a95edb9dSMitch Phillips   FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
456a95edb9dSMitch Phillips   return SlotIndex;
457a95edb9dSMitch Phillips }
458a95edb9dSMitch Phillips 
freeSlot(size_t SlotIndex)459a95edb9dSMitch Phillips void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
460a6258684SMitch Phillips   assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
461a95edb9dSMitch Phillips   FreeSlots[FreeSlotsLength++] = SlotIndex;
462a95edb9dSMitch Phillips }
463a95edb9dSMitch Phillips 
getRandomUnsigned32()4643580a450SKostya Kortchinsky uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
46590678f65SKostya Kortchinsky   uint32_t RandomState = getThreadLocals()->RandomState;
4663580a450SKostya Kortchinsky   RandomState ^= RandomState << 13;
4673580a450SKostya Kortchinsky   RandomState ^= RandomState >> 17;
4683580a450SKostya Kortchinsky   RandomState ^= RandomState << 5;
46990678f65SKostya Kortchinsky   getThreadLocals()->RandomState = RandomState;
4703580a450SKostya Kortchinsky   return RandomState;
4713580a450SKostya Kortchinsky }
472a95edb9dSMitch Phillips } // namespace gwp_asan
473