xref: /llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp (revision 0dcf3324cfb4429e85b54e857f9bb86f423ffc5e)
1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "gwp_asan/guarded_pool_allocator.h"
10 
11 #include "gwp_asan/options.h"
12 #include "gwp_asan/utilities.h"
13 
14 #include <assert.h>
15 
16 using AllocationMetadata = gwp_asan::AllocationMetadata;
17 using Error = gwp_asan::Error;
18 
19 namespace gwp_asan {
20 namespace {
21 // Forward declare the pointer to the singleton version of this class.
22 // Instantiated during initialisation, this allows the signal handler
23 // to find this class in order to deduce the root cause of failures. Must not be
24 // referenced by users outside this translation unit, in order to avoid
25 // init-order-fiasco.
26 GuardedPoolAllocator *SingletonPtr = nullptr;
27 
28 size_t roundUpTo(size_t Size, size_t Boundary) {
29   return (Size + Boundary - 1) & ~(Boundary - 1);
30 }
31 
32 uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
33   return Ptr & ~(PageSize - 1);
34 }
35 } // anonymous namespace
36 
37 // Gets the singleton implementation of this class. Thread-compatible until
38 // init() is called, thread-safe afterwards.
39 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
40   return SingletonPtr;
41 }
42 
43 void GuardedPoolAllocator::init(const options::Options &Opts) {
44   // Note: We return from the constructor here if GWP-ASan is not available.
45   // This will stop heap-allocation of class members, as well as mmap() of the
46   // guarded slots.
47   if (!Opts.Enabled || Opts.SampleRate == 0 ||
48       Opts.MaxSimultaneousAllocations == 0)
49     return;
50 
51   Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
52   Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
53   Check(Opts.MaxSimultaneousAllocations >= 0,
54         "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
55 
56   SingletonPtr = this;
57   Backtrace = Opts.Backtrace;
58 
59   State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
60 
61   const size_t PageSize = getPlatformPageSize();
62   // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
63   assert((PageSize & (PageSize - 1)) == 0);
64   State.PageSize = PageSize;
65 
66   PerfectlyRightAlign = Opts.PerfectlyRightAlign;
67 
68   size_t PoolBytesRequired =
69       PageSize * (1 + State.MaxSimultaneousAllocations) +
70       State.MaxSimultaneousAllocations * State.maximumAllocationSize();
71   assert(PoolBytesRequired % PageSize == 0);
72   void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
73 
74   size_t BytesRequired =
75       roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
76   Metadata = reinterpret_cast<AllocationMetadata *>(
77       map(BytesRequired, kGwpAsanMetadataName));
78 
79   // Allocate memory and set up the free pages queue.
80   BytesRequired = roundUpTo(
81       State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
82   FreeSlots =
83       reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
84 
85   // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
86   // SampleRate) chance of sampling.
87   if (Opts.SampleRate != 1)
88     AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
89   else
90     AdjustedSampleRatePlusOne = 2;
91 
92   initPRNG();
93   getThreadLocals()->NextSampleCounter =
94       ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
95       ThreadLocalPackedVariables::NextSampleCounterMask;
96 
97   State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
98   State.GuardedPagePoolEnd =
99       reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
100 
101   if (Opts.InstallForkHandlers)
102     installAtFork();
103 }
104 
105 void GuardedPoolAllocator::disable() {
106   PoolMutex.lock();
107   BacktraceMutex.lock();
108 }
109 
110 void GuardedPoolAllocator::enable() {
111   PoolMutex.unlock();
112   BacktraceMutex.unlock();
113 }
114 
115 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
116                                    void *Arg) {
117   uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
118   for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
119     const AllocationMetadata &Meta = Metadata[i];
120     if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
121         Meta.Addr < Start + Size)
122       Cb(Meta.Addr, Meta.Size, Arg);
123   }
124 }
125 
126 void GuardedPoolAllocator::uninitTestOnly() {
127   if (State.GuardedPagePool) {
128     unreserveGuardedPool();
129     State.GuardedPagePool = 0;
130     State.GuardedPagePoolEnd = 0;
131   }
132   if (Metadata) {
133     unmap(Metadata,
134           roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
135                     State.PageSize));
136     Metadata = nullptr;
137   }
138   if (FreeSlots) {
139     unmap(FreeSlots,
140           roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
141                     State.PageSize));
142     FreeSlots = nullptr;
143   }
144   *getThreadLocals() = ThreadLocalPackedVariables();
145 }
146 
147 void *GuardedPoolAllocator::allocate(size_t Size) {
148   // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
149   // back to the supporting allocator.
150   if (State.GuardedPagePoolEnd == 0) {
151     getThreadLocals()->NextSampleCounter =
152         (AdjustedSampleRatePlusOne - 1) &
153         ThreadLocalPackedVariables::NextSampleCounterMask;
154     return nullptr;
155   }
156 
157   // Protect against recursivity.
158   if (getThreadLocals()->RecursiveGuard)
159     return nullptr;
160   ScopedRecursiveGuard SRG;
161 
162   if (Size == 0 || Size > State.maximumAllocationSize())
163     return nullptr;
164 
165   size_t Index;
166   {
167     ScopedLock L(PoolMutex);
168     Index = reserveSlot();
169   }
170 
171   if (Index == kInvalidSlotID)
172     return nullptr;
173 
174   uintptr_t Ptr = State.slotToAddr(Index);
175   // Should we right-align this allocation?
176   if (getRandomUnsigned32() % 2 == 0) {
177     AlignmentStrategy Align = AlignmentStrategy::DEFAULT;
178     if (PerfectlyRightAlign)
179       Align = AlignmentStrategy::PERFECT;
180     Ptr +=
181         State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align);
182   }
183   AllocationMetadata *Meta = addrToMetadata(Ptr);
184 
185   // If a slot is multiple pages in size, and the allocation takes up a single
186   // page, we can improve overflow detection by leaving the unused pages as
187   // unmapped.
188   const size_t PageSize = State.PageSize;
189   allocateInGuardedPool(reinterpret_cast<void *>(getPageAddr(Ptr, PageSize)),
190                         roundUpTo(Size, PageSize));
191 
192   Meta->RecordAllocation(Ptr, Size);
193   {
194     ScopedLock UL(BacktraceMutex);
195     Meta->AllocationTrace.RecordBacktrace(Backtrace);
196   }
197 
198   return reinterpret_cast<void *>(Ptr);
199 }
200 
201 void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
202   State.FailureType = E;
203   State.FailureAddress = Address;
204 
205   // Raise a SEGV by touching first guard page.
206   volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
207   *p = 0;
208   __builtin_unreachable();
209 }
210 
211 void GuardedPoolAllocator::stop() {
212   getThreadLocals()->RecursiveGuard = true;
213   PoolMutex.tryLock();
214 }
215 
216 void GuardedPoolAllocator::deallocate(void *Ptr) {
217   assert(pointerIsMine(Ptr) && "Pointer is not mine!");
218   uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
219   size_t Slot = State.getNearestSlot(UPtr);
220   uintptr_t SlotStart = State.slotToAddr(Slot);
221   AllocationMetadata *Meta = addrToMetadata(UPtr);
222   if (Meta->Addr != UPtr) {
223     // If multiple errors occur at the same time, use the first one.
224     ScopedLock L(PoolMutex);
225     trapOnAddress(UPtr, Error::INVALID_FREE);
226   }
227 
228   // Intentionally scope the mutex here, so that other threads can access the
229   // pool during the expensive markInaccessible() call.
230   {
231     ScopedLock L(PoolMutex);
232     if (Meta->IsDeallocated) {
233       trapOnAddress(UPtr, Error::DOUBLE_FREE);
234     }
235 
236     // Ensure that the deallocation is recorded before marking the page as
237     // inaccessible. Otherwise, a racy use-after-free will have inconsistent
238     // metadata.
239     Meta->RecordDeallocation();
240 
241     // Ensure that the unwinder is not called if the recursive flag is set,
242     // otherwise non-reentrant unwinders may deadlock.
243     if (!getThreadLocals()->RecursiveGuard) {
244       ScopedRecursiveGuard SRG;
245       ScopedLock UL(BacktraceMutex);
246       Meta->DeallocationTrace.RecordBacktrace(Backtrace);
247     }
248   }
249 
250   deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
251                           State.maximumAllocationSize());
252 
253   // And finally, lock again to release the slot back into the pool.
254   ScopedLock L(PoolMutex);
255   freeSlot(Slot);
256 }
257 
258 size_t GuardedPoolAllocator::getSize(const void *Ptr) {
259   assert(pointerIsMine(Ptr));
260   ScopedLock L(PoolMutex);
261   AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
262   assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
263   return Meta->Size;
264 }
265 
266 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
267   return &Metadata[State.getNearestSlot(Ptr)];
268 }
269 
270 size_t GuardedPoolAllocator::reserveSlot() {
271   // Avoid potential reuse of a slot before we have made at least a single
272   // allocation in each slot. Helps with our use-after-free detection.
273   if (NumSampledAllocations < State.MaxSimultaneousAllocations)
274     return NumSampledAllocations++;
275 
276   if (FreeSlotsLength == 0)
277     return kInvalidSlotID;
278 
279   size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
280   size_t SlotIndex = FreeSlots[ReservedIndex];
281   FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
282   return SlotIndex;
283 }
284 
285 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
286   assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
287   FreeSlots[FreeSlotsLength++] = SlotIndex;
288 }
289 
290 uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
291   uint32_t RandomState = getThreadLocals()->RandomState;
292   RandomState ^= RandomState << 13;
293   RandomState ^= RandomState >> 17;
294   RandomState ^= RandomState << 5;
295   getThreadLocals()->RandomState = RandomState;
296   return RandomState;
297 }
298 } // namespace gwp_asan
299