1 //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms 11 /// to the LLVM "Allocator" concept and is similar to MallocAllocator, but 12 /// objects cannot be deallocated. Their lifetime is tied to the lifetime of the 13 /// allocator. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #ifndef LLVM_SUPPORT_ALLOCATOR_H 18 #define LLVM_SUPPORT_ALLOCATOR_H 19 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/Support/Alignment.h" 22 #include "llvm/Support/AllocatorBase.h" 23 #include "llvm/Support/Compiler.h" 24 #include "llvm/Support/MathExtras.h" 25 #include <algorithm> 26 #include <cassert> 27 #include <cstddef> 28 #include <cstdint> 29 #include <iterator> 30 #include <optional> 31 #include <utility> 32 33 namespace llvm { 34 35 namespace detail { 36 37 // We call out to an external function to actually print the message as the 38 // printing code uses Allocator.h in its implementation. 39 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, 40 size_t TotalMemory); 41 42 } // end namespace detail 43 44 /// Allocate memory in an ever growing pool, as if by bump-pointer. 45 /// 46 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of 47 /// memory rather than relying on a boundless contiguous heap. However, it has 48 /// bump-pointer semantics in that it is a monotonically growing pool of memory 49 /// where every allocation is found by merely allocating the next N bytes in 50 /// the slab, or the next N bytes in the next slab. 51 /// 52 /// Note that this also has a threshold for forcing allocations above a certain 53 /// size into their own slab. 54 /// 55 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator 56 /// object, which wraps malloc, to allocate memory, but it can be changed to 57 /// use a custom allocator. 58 /// 59 /// The GrowthDelay specifies after how many allocated slabs the allocator 60 /// increases the size of the slabs. 61 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096, 62 size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128> 63 class BumpPtrAllocatorImpl 64 : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize, 65 SizeThreshold, GrowthDelay>>, 66 private detail::AllocatorHolder<AllocatorT> { 67 using AllocTy = detail::AllocatorHolder<AllocatorT>; 68 69 public: 70 static_assert(SizeThreshold <= SlabSize, 71 "The SizeThreshold must be at most the SlabSize to ensure " 72 "that objects larger than a slab go into their own memory " 73 "allocation."); 74 static_assert(GrowthDelay > 0, 75 "GrowthDelay must be at least 1 which already increases the" 76 "slab size after each allocated slab."); 77 78 BumpPtrAllocatorImpl() = default; 79 80 template <typename T> 81 BumpPtrAllocatorImpl(T &&Allocator) 82 : AllocTy(std::forward<T &&>(Allocator)) {} 83 84 // Manually implement a move constructor as we must clear the old allocator's 85 // slabs as a matter of correctness. 86 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) 87 : AllocTy(std::move(Old.getAllocator())), CurPtr(Old.CurPtr), 88 End(Old.End), Slabs(std::move(Old.Slabs)), 89 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), 90 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) { 91 Old.CurPtr = Old.End = nullptr; 92 Old.BytesAllocated = 0; 93 Old.Slabs.clear(); 94 Old.CustomSizedSlabs.clear(); 95 } 96 97 ~BumpPtrAllocatorImpl() { 98 DeallocateSlabs(Slabs.begin(), Slabs.end()); 99 DeallocateCustomSizedSlabs(); 100 } 101 102 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { 103 DeallocateSlabs(Slabs.begin(), Slabs.end()); 104 DeallocateCustomSizedSlabs(); 105 106 CurPtr = RHS.CurPtr; 107 End = RHS.End; 108 BytesAllocated = RHS.BytesAllocated; 109 RedZoneSize = RHS.RedZoneSize; 110 Slabs = std::move(RHS.Slabs); 111 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); 112 AllocTy::operator=(std::move(RHS.getAllocator())); 113 114 RHS.CurPtr = RHS.End = nullptr; 115 RHS.BytesAllocated = 0; 116 RHS.Slabs.clear(); 117 RHS.CustomSizedSlabs.clear(); 118 return *this; 119 } 120 121 /// Deallocate all but the current slab and reset the current pointer 122 /// to the beginning of it, freeing all memory allocated so far. 123 void Reset() { 124 // Deallocate all but the first slab, and deallocate all custom-sized slabs. 125 DeallocateCustomSizedSlabs(); 126 CustomSizedSlabs.clear(); 127 128 if (Slabs.empty()) 129 return; 130 131 // Reset the state. 132 BytesAllocated = 0; 133 CurPtr = (char *)Slabs.front(); 134 End = CurPtr + SlabSize; 135 136 __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0)); 137 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); 138 Slabs.erase(std::next(Slabs.begin()), Slabs.end()); 139 } 140 141 /// Allocate space at the specified alignment. 142 // This method is *not* marked noalias, because 143 // SpecificBumpPtrAllocator::DestroyAll() loops over all allocations, and 144 // that loop is not based on the Allocate() return value. 145 // 146 // Allocate(0, N) is valid, it returns a non-null pointer (which should not 147 // be dereferenced). 148 LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, Align Alignment) { 149 // Keep track of how many bytes we've allocated. 150 BytesAllocated += Size; 151 152 size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment); 153 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow"); 154 155 size_t SizeToAllocate = Size; 156 #if LLVM_ADDRESS_SANITIZER_BUILD 157 // Add trailing bytes as a "red zone" under ASan. 158 SizeToAllocate += RedZoneSize; 159 #endif 160 161 // Check if we have enough space. 162 if (LLVM_LIKELY(Adjustment + SizeToAllocate <= size_t(End - CurPtr) 163 // We can't return nullptr even for a zero-sized allocation! 164 && CurPtr != nullptr)) { 165 char *AlignedPtr = CurPtr + Adjustment; 166 CurPtr = AlignedPtr + SizeToAllocate; 167 // Update the allocation point of this memory block in MemorySanitizer. 168 // Without this, MemorySanitizer messages for values originated from here 169 // will point to the allocation of the entire slab. 170 __msan_allocated_memory(AlignedPtr, Size); 171 // Similarly, tell ASan about this space. 172 __asan_unpoison_memory_region(AlignedPtr, Size); 173 return AlignedPtr; 174 } 175 176 return AllocateSlow(Size, SizeToAllocate, Alignment); 177 } 178 179 LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_NOINLINE void * 180 AllocateSlow(size_t Size, size_t SizeToAllocate, Align Alignment) { 181 // If Size is really big, allocate a separate slab for it. 182 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1; 183 if (PaddedSize > SizeThreshold) { 184 void *NewSlab = 185 this->getAllocator().Allocate(PaddedSize, alignof(std::max_align_t)); 186 // We own the new slab and don't want anyone reading anyting other than 187 // pieces returned from this method. So poison the whole slab. 188 __asan_poison_memory_region(NewSlab, PaddedSize); 189 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); 190 191 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); 192 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); 193 char *AlignedPtr = (char*)AlignedAddr; 194 __msan_allocated_memory(AlignedPtr, Size); 195 __asan_unpoison_memory_region(AlignedPtr, Size); 196 return AlignedPtr; 197 } 198 199 // Otherwise, start a new slab and try again. 200 StartNewSlab(); 201 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); 202 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End && 203 "Unable to allocate memory!"); 204 char *AlignedPtr = (char*)AlignedAddr; 205 CurPtr = AlignedPtr + SizeToAllocate; 206 __msan_allocated_memory(AlignedPtr, Size); 207 __asan_unpoison_memory_region(AlignedPtr, Size); 208 return AlignedPtr; 209 } 210 211 inline LLVM_ATTRIBUTE_RETURNS_NONNULL void * 212 Allocate(size_t Size, size_t Alignment) { 213 assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead."); 214 return Allocate(Size, Align(Alignment)); 215 } 216 217 // Pull in base class overloads. 218 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate; 219 220 // Bump pointer allocators are expected to never free their storage; and 221 // clients expect pointers to remain valid for non-dereferencing uses even 222 // after deallocation. 223 void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) { 224 __asan_poison_memory_region(Ptr, Size); 225 } 226 227 // Pull in base class overloads. 228 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate; 229 230 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } 231 232 /// \return An index uniquely and reproducibly identifying 233 /// an input pointer \p Ptr in the given allocator. 234 /// The returned value is negative iff the object is inside a custom-size 235 /// slab. 236 /// Returns an empty optional if the pointer is not found in the allocator. 237 std::optional<int64_t> identifyObject(const void *Ptr) { 238 const char *P = static_cast<const char *>(Ptr); 239 int64_t InSlabIdx = 0; 240 for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) { 241 const char *S = static_cast<const char *>(Slabs[Idx]); 242 if (P >= S && P < S + computeSlabSize(Idx)) 243 return InSlabIdx + static_cast<int64_t>(P - S); 244 InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx)); 245 } 246 247 // Use negative index to denote custom sized slabs. 248 int64_t InCustomSizedSlabIdx = -1; 249 for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) { 250 const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first); 251 size_t Size = CustomSizedSlabs[Idx].second; 252 if (P >= S && P < S + Size) 253 return InCustomSizedSlabIdx - static_cast<int64_t>(P - S); 254 InCustomSizedSlabIdx -= static_cast<int64_t>(Size); 255 } 256 return std::nullopt; 257 } 258 259 /// A wrapper around identifyObject that additionally asserts that 260 /// the object is indeed within the allocator. 261 /// \return An index uniquely and reproducibly identifying 262 /// an input pointer \p Ptr in the given allocator. 263 int64_t identifyKnownObject(const void *Ptr) { 264 std::optional<int64_t> Out = identifyObject(Ptr); 265 assert(Out && "Wrong allocator used"); 266 return *Out; 267 } 268 269 /// A wrapper around identifyKnownObject. Accepts type information 270 /// about the object and produces a smaller identifier by relying on 271 /// the alignment information. Note that sub-classes may have different 272 /// alignment, so the most base class should be passed as template parameter 273 /// in order to obtain correct results. For that reason automatic template 274 /// parameter deduction is disabled. 275 /// \return An index uniquely and reproducibly identifying 276 /// an input pointer \p Ptr in the given allocator. This identifier is 277 /// different from the ones produced by identifyObject and 278 /// identifyAlignedObject. 279 template <typename T> 280 int64_t identifyKnownAlignedObject(const void *Ptr) { 281 int64_t Out = identifyKnownObject(Ptr); 282 assert(Out % alignof(T) == 0 && "Wrong alignment information"); 283 return Out / alignof(T); 284 } 285 286 size_t getTotalMemory() const { 287 size_t TotalMemory = 0; 288 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I) 289 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I)); 290 for (const auto &PtrAndSize : CustomSizedSlabs) 291 TotalMemory += PtrAndSize.second; 292 return TotalMemory; 293 } 294 295 size_t getBytesAllocated() const { return BytesAllocated; } 296 297 void setRedZoneSize(size_t NewSize) { 298 RedZoneSize = NewSize; 299 } 300 301 void PrintStats() const { 302 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, 303 getTotalMemory()); 304 } 305 306 private: 307 /// The current pointer into the current slab. 308 /// 309 /// This points to the next free byte in the slab. 310 char *CurPtr = nullptr; 311 312 /// The end of the current slab. 313 char *End = nullptr; 314 315 /// The slabs allocated so far. 316 SmallVector<void *, 4> Slabs; 317 318 /// Custom-sized slabs allocated for too-large allocation requests. 319 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs; 320 321 /// How many bytes we've allocated. 322 /// 323 /// Used so that we can compute how much space was wasted. 324 size_t BytesAllocated = 0; 325 326 /// The number of bytes to put between allocations when running under 327 /// a sanitizer. 328 size_t RedZoneSize = 1; 329 330 static size_t computeSlabSize(unsigned SlabIdx) { 331 // Scale the actual allocated slab size based on the number of slabs 332 // allocated. Every GrowthDelay slabs allocated, we double 333 // the allocated size to reduce allocation frequency, but saturate at 334 // multiplying the slab size by 2^30. 335 return SlabSize * 336 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay)); 337 } 338 339 /// Allocate a new slab and move the bump pointers over into the new 340 /// slab, modifying CurPtr and End. 341 void StartNewSlab() { 342 size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); 343 344 void *NewSlab = this->getAllocator().Allocate(AllocatedSlabSize, 345 alignof(std::max_align_t)); 346 // We own the new slab and don't want anyone reading anything other than 347 // pieces returned from this method. So poison the whole slab. 348 __asan_poison_memory_region(NewSlab, AllocatedSlabSize); 349 350 Slabs.push_back(NewSlab); 351 CurPtr = (char *)(NewSlab); 352 End = ((char *)NewSlab) + AllocatedSlabSize; 353 } 354 355 /// Deallocate a sequence of slabs. 356 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I, 357 SmallVectorImpl<void *>::iterator E) { 358 for (; I != E; ++I) { 359 size_t AllocatedSlabSize = 360 computeSlabSize(std::distance(Slabs.begin(), I)); 361 this->getAllocator().Deallocate(*I, AllocatedSlabSize, 362 alignof(std::max_align_t)); 363 } 364 } 365 366 /// Deallocate all memory for custom sized slabs. 367 void DeallocateCustomSizedSlabs() { 368 for (auto &PtrAndSize : CustomSizedSlabs) { 369 void *Ptr = PtrAndSize.first; 370 size_t Size = PtrAndSize.second; 371 this->getAllocator().Deallocate(Ptr, Size, alignof(std::max_align_t)); 372 } 373 } 374 375 template <typename T> friend class SpecificBumpPtrAllocator; 376 }; 377 378 /// The standard BumpPtrAllocator which just uses the default template 379 /// parameters. 380 typedef BumpPtrAllocatorImpl<> BumpPtrAllocator; 381 382 /// A BumpPtrAllocator that allows only elements of a specific type to be 383 /// allocated. 384 /// 385 /// This allows calling the destructor in DestroyAll() and when the allocator is 386 /// destroyed. 387 template <typename T> class SpecificBumpPtrAllocator { 388 BumpPtrAllocator Allocator; 389 390 public: 391 SpecificBumpPtrAllocator() { 392 // Because SpecificBumpPtrAllocator walks the memory to call destructors, 393 // it can't have red zones between allocations. 394 Allocator.setRedZoneSize(0); 395 } 396 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) 397 : Allocator(std::move(Old.Allocator)) {} 398 ~SpecificBumpPtrAllocator() { DestroyAll(); } 399 400 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { 401 Allocator = std::move(RHS.Allocator); 402 return *this; 403 } 404 405 /// Call the destructor of each allocated object and deallocate all but the 406 /// current slab and reset the current pointer to the beginning of it, freeing 407 /// all memory allocated so far. 408 void DestroyAll() { 409 auto DestroyElements = [](char *Begin, char *End) { 410 assert(Begin == (char *)alignAddr(Begin, Align::Of<T>())); 411 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) 412 reinterpret_cast<T *>(Ptr)->~T(); 413 }; 414 415 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E; 416 ++I) { 417 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( 418 std::distance(Allocator.Slabs.begin(), I)); 419 char *Begin = (char *)alignAddr(*I, Align::Of<T>()); 420 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr 421 : (char *)*I + AllocatedSlabSize; 422 423 DestroyElements(Begin, End); 424 } 425 426 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { 427 void *Ptr = PtrAndSize.first; 428 size_t Size = PtrAndSize.second; 429 DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()), 430 (char *)Ptr + Size); 431 } 432 433 Allocator.Reset(); 434 } 435 436 /// Allocate space for an array of objects without constructing them. 437 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); } 438 }; 439 440 } // end namespace llvm 441 442 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, 443 size_t GrowthDelay> 444 void * 445 operator new(size_t Size, 446 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold, 447 GrowthDelay> &Allocator) { 448 return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size), 449 alignof(std::max_align_t))); 450 } 451 452 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, 453 size_t GrowthDelay> 454 void operator delete(void *, 455 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, 456 SizeThreshold, GrowthDelay> &) { 457 } 458 459 #endif // LLVM_SUPPORT_ALLOCATOR_H 460