1 //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms 11 /// to the LLVM "Allocator" concept and is similar to MallocAllocator, but 12 /// objects cannot be deallocated. Their lifetime is tied to the lifetime of the 13 /// allocator. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #ifndef LLVM_SUPPORT_ALLOCATOR_H 18 #define LLVM_SUPPORT_ALLOCATOR_H 19 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/Support/Alignment.h" 22 #include "llvm/Support/AllocatorBase.h" 23 #include "llvm/Support/Compiler.h" 24 #include "llvm/Support/MathExtras.h" 25 #include <algorithm> 26 #include <cassert> 27 #include <cstddef> 28 #include <cstdint> 29 #include <iterator> 30 #include <optional> 31 #include <utility> 32 33 namespace llvm { 34 35 namespace detail { 36 37 // We call out to an external function to actually print the message as the 38 // printing code uses Allocator.h in its implementation. 39 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, 40 size_t TotalMemory); 41 42 } // end namespace detail 43 44 /// Allocate memory in an ever growing pool, as if by bump-pointer. 45 /// 46 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of 47 /// memory rather than relying on a boundless contiguous heap. However, it has 48 /// bump-pointer semantics in that it is a monotonically growing pool of memory 49 /// where every allocation is found by merely allocating the next N bytes in 50 /// the slab, or the next N bytes in the next slab. 51 /// 52 /// Note that this also has a threshold for forcing allocations above a certain 53 /// size into their own slab. 54 /// 55 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator 56 /// object, which wraps malloc, to allocate memory, but it can be changed to 57 /// use a custom allocator. 58 /// 59 /// The GrowthDelay specifies after how many allocated slabs the allocator 60 /// increases the size of the slabs. 61 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096, 62 size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128> 63 class BumpPtrAllocatorImpl 64 : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize, 65 SizeThreshold, GrowthDelay>>, 66 private detail::AllocatorHolder<AllocatorT> { 67 using AllocTy = detail::AllocatorHolder<AllocatorT>; 68 69 public: 70 static_assert(SizeThreshold <= SlabSize, 71 "The SizeThreshold must be at most the SlabSize to ensure " 72 "that objects larger than a slab go into their own memory " 73 "allocation."); 74 static_assert(GrowthDelay > 0, 75 "GrowthDelay must be at least 1 which already increases the" 76 "slab size after each allocated slab."); 77 78 BumpPtrAllocatorImpl() = default; 79 80 template <typename T> 81 BumpPtrAllocatorImpl(T &&Allocator) 82 : AllocTy(std::forward<T &&>(Allocator)) {} 83 84 // Manually implement a move constructor as we must clear the old allocator's 85 // slabs as a matter of correctness. 86 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) 87 : AllocTy(std::move(Old.getAllocator())), CurPtr(Old.CurPtr), 88 End(Old.End), Slabs(std::move(Old.Slabs)), 89 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), 90 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) { 91 Old.CurPtr = Old.End = nullptr; 92 Old.BytesAllocated = 0; 93 Old.Slabs.clear(); 94 Old.CustomSizedSlabs.clear(); 95 } 96 97 ~BumpPtrAllocatorImpl() { 98 DeallocateSlabs(Slabs.begin(), Slabs.end()); 99 DeallocateCustomSizedSlabs(); 100 } 101 102 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { 103 DeallocateSlabs(Slabs.begin(), Slabs.end()); 104 DeallocateCustomSizedSlabs(); 105 106 CurPtr = RHS.CurPtr; 107 End = RHS.End; 108 BytesAllocated = RHS.BytesAllocated; 109 RedZoneSize = RHS.RedZoneSize; 110 Slabs = std::move(RHS.Slabs); 111 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); 112 AllocTy::operator=(std::move(RHS.getAllocator())); 113 114 RHS.CurPtr = RHS.End = nullptr; 115 RHS.BytesAllocated = 0; 116 RHS.Slabs.clear(); 117 RHS.CustomSizedSlabs.clear(); 118 return *this; 119 } 120 121 /// Deallocate all but the current slab and reset the current pointer 122 /// to the beginning of it, freeing all memory allocated so far. 123 void Reset() { 124 // Deallocate all but the first slab, and deallocate all custom-sized slabs. 125 DeallocateCustomSizedSlabs(); 126 CustomSizedSlabs.clear(); 127 128 if (Slabs.empty()) 129 return; 130 131 // Reset the state. 132 BytesAllocated = 0; 133 CurPtr = (char *)Slabs.front(); 134 End = CurPtr + SlabSize; 135 136 __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0)); 137 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); 138 Slabs.erase(std::next(Slabs.begin()), Slabs.end()); 139 } 140 141 /// Allocate space at the specified alignment. 142 // This method is *not* marked noalias, because 143 // SpecificBumpPtrAllocator::DestroyAll() loops over all allocations, and 144 // that loop is not based on the Allocate() return value. 145 // 146 // Allocate(0, N) is valid, it returns a non-null pointer (which should not 147 // be dereferenced). 148 LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, Align Alignment) { 149 // Keep track of how many bytes we've allocated. 150 BytesAllocated += Size; 151 152 uintptr_t AlignedPtr = alignAddr(CurPtr, Alignment); 153 154 size_t SizeToAllocate = Size; 155 #if LLVM_ADDRESS_SANITIZER_BUILD 156 // Add trailing bytes as a "red zone" under ASan. 157 SizeToAllocate += RedZoneSize; 158 #endif 159 160 uintptr_t AllocEndPtr = AlignedPtr + SizeToAllocate; 161 assert(AllocEndPtr >= uintptr_t(CurPtr) && 162 "Alignment + Size must not overflow"); 163 164 // Check if we have enough space. 165 if (LLVM_LIKELY(AllocEndPtr <= uintptr_t(End) 166 // We can't return nullptr even for a zero-sized allocation! 167 && CurPtr != nullptr)) { 168 CurPtr = reinterpret_cast<char *>(AllocEndPtr); 169 // Update the allocation point of this memory block in MemorySanitizer. 170 // Without this, MemorySanitizer messages for values originated from here 171 // will point to the allocation of the entire slab. 172 __msan_allocated_memory(reinterpret_cast<char *>(AlignedPtr), Size); 173 // Similarly, tell ASan about this space. 174 __asan_unpoison_memory_region(reinterpret_cast<char *>(AlignedPtr), Size); 175 return reinterpret_cast<char *>(AlignedPtr); 176 } 177 178 return AllocateSlow(Size, SizeToAllocate, Alignment); 179 } 180 181 LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_NOINLINE void * 182 AllocateSlow(size_t Size, size_t SizeToAllocate, Align Alignment) { 183 // If Size is really big, allocate a separate slab for it. 184 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1; 185 if (PaddedSize > SizeThreshold) { 186 void *NewSlab = 187 this->getAllocator().Allocate(PaddedSize, alignof(std::max_align_t)); 188 // We own the new slab and don't want anyone reading anyting other than 189 // pieces returned from this method. So poison the whole slab. 190 __asan_poison_memory_region(NewSlab, PaddedSize); 191 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); 192 193 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); 194 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); 195 char *AlignedPtr = (char*)AlignedAddr; 196 __msan_allocated_memory(AlignedPtr, Size); 197 __asan_unpoison_memory_region(AlignedPtr, Size); 198 return AlignedPtr; 199 } 200 201 // Otherwise, start a new slab and try again. 202 StartNewSlab(); 203 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); 204 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End && 205 "Unable to allocate memory!"); 206 char *AlignedPtr = (char*)AlignedAddr; 207 CurPtr = AlignedPtr + SizeToAllocate; 208 __msan_allocated_memory(AlignedPtr, Size); 209 __asan_unpoison_memory_region(AlignedPtr, Size); 210 return AlignedPtr; 211 } 212 213 inline LLVM_ATTRIBUTE_RETURNS_NONNULL void * 214 Allocate(size_t Size, size_t Alignment) { 215 assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead."); 216 return Allocate(Size, Align(Alignment)); 217 } 218 219 // Pull in base class overloads. 220 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate; 221 222 // Bump pointer allocators are expected to never free their storage; and 223 // clients expect pointers to remain valid for non-dereferencing uses even 224 // after deallocation. 225 void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) { 226 __asan_poison_memory_region(Ptr, Size); 227 } 228 229 // Pull in base class overloads. 230 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate; 231 232 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } 233 234 /// \return An index uniquely and reproducibly identifying 235 /// an input pointer \p Ptr in the given allocator. 236 /// The returned value is negative iff the object is inside a custom-size 237 /// slab. 238 /// Returns an empty optional if the pointer is not found in the allocator. 239 std::optional<int64_t> identifyObject(const void *Ptr) { 240 const char *P = static_cast<const char *>(Ptr); 241 int64_t InSlabIdx = 0; 242 for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) { 243 const char *S = static_cast<const char *>(Slabs[Idx]); 244 if (P >= S && P < S + computeSlabSize(Idx)) 245 return InSlabIdx + static_cast<int64_t>(P - S); 246 InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx)); 247 } 248 249 // Use negative index to denote custom sized slabs. 250 int64_t InCustomSizedSlabIdx = -1; 251 for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) { 252 const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first); 253 size_t Size = CustomSizedSlabs[Idx].second; 254 if (P >= S && P < S + Size) 255 return InCustomSizedSlabIdx - static_cast<int64_t>(P - S); 256 InCustomSizedSlabIdx -= static_cast<int64_t>(Size); 257 } 258 return std::nullopt; 259 } 260 261 /// A wrapper around identifyObject that additionally asserts that 262 /// the object is indeed within the allocator. 263 /// \return An index uniquely and reproducibly identifying 264 /// an input pointer \p Ptr in the given allocator. 265 int64_t identifyKnownObject(const void *Ptr) { 266 std::optional<int64_t> Out = identifyObject(Ptr); 267 assert(Out && "Wrong allocator used"); 268 return *Out; 269 } 270 271 /// A wrapper around identifyKnownObject. Accepts type information 272 /// about the object and produces a smaller identifier by relying on 273 /// the alignment information. Note that sub-classes may have different 274 /// alignment, so the most base class should be passed as template parameter 275 /// in order to obtain correct results. For that reason automatic template 276 /// parameter deduction is disabled. 277 /// \return An index uniquely and reproducibly identifying 278 /// an input pointer \p Ptr in the given allocator. This identifier is 279 /// different from the ones produced by identifyObject and 280 /// identifyAlignedObject. 281 template <typename T> 282 int64_t identifyKnownAlignedObject(const void *Ptr) { 283 int64_t Out = identifyKnownObject(Ptr); 284 assert(Out % alignof(T) == 0 && "Wrong alignment information"); 285 return Out / alignof(T); 286 } 287 288 size_t getTotalMemory() const { 289 size_t TotalMemory = 0; 290 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I) 291 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I)); 292 for (const auto &PtrAndSize : CustomSizedSlabs) 293 TotalMemory += PtrAndSize.second; 294 return TotalMemory; 295 } 296 297 size_t getBytesAllocated() const { return BytesAllocated; } 298 299 void setRedZoneSize(size_t NewSize) { 300 RedZoneSize = NewSize; 301 } 302 303 void PrintStats() const { 304 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated, 305 getTotalMemory()); 306 } 307 308 private: 309 /// The current pointer into the current slab. 310 /// 311 /// This points to the next free byte in the slab. 312 char *CurPtr = nullptr; 313 314 /// The end of the current slab. 315 char *End = nullptr; 316 317 /// The slabs allocated so far. 318 SmallVector<void *, 4> Slabs; 319 320 /// Custom-sized slabs allocated for too-large allocation requests. 321 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs; 322 323 /// How many bytes we've allocated. 324 /// 325 /// Used so that we can compute how much space was wasted. 326 size_t BytesAllocated = 0; 327 328 /// The number of bytes to put between allocations when running under 329 /// a sanitizer. 330 size_t RedZoneSize = 1; 331 332 static size_t computeSlabSize(unsigned SlabIdx) { 333 // Scale the actual allocated slab size based on the number of slabs 334 // allocated. Every GrowthDelay slabs allocated, we double 335 // the allocated size to reduce allocation frequency, but saturate at 336 // multiplying the slab size by 2^30. 337 return SlabSize * 338 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay)); 339 } 340 341 /// Allocate a new slab and move the bump pointers over into the new 342 /// slab, modifying CurPtr and End. 343 void StartNewSlab() { 344 size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); 345 346 void *NewSlab = this->getAllocator().Allocate(AllocatedSlabSize, 347 alignof(std::max_align_t)); 348 // We own the new slab and don't want anyone reading anything other than 349 // pieces returned from this method. So poison the whole slab. 350 __asan_poison_memory_region(NewSlab, AllocatedSlabSize); 351 352 Slabs.push_back(NewSlab); 353 CurPtr = (char *)(NewSlab); 354 End = ((char *)NewSlab) + AllocatedSlabSize; 355 } 356 357 /// Deallocate a sequence of slabs. 358 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I, 359 SmallVectorImpl<void *>::iterator E) { 360 for (; I != E; ++I) { 361 size_t AllocatedSlabSize = 362 computeSlabSize(std::distance(Slabs.begin(), I)); 363 this->getAllocator().Deallocate(*I, AllocatedSlabSize, 364 alignof(std::max_align_t)); 365 } 366 } 367 368 /// Deallocate all memory for custom sized slabs. 369 void DeallocateCustomSizedSlabs() { 370 for (auto &PtrAndSize : CustomSizedSlabs) { 371 void *Ptr = PtrAndSize.first; 372 size_t Size = PtrAndSize.second; 373 this->getAllocator().Deallocate(Ptr, Size, alignof(std::max_align_t)); 374 } 375 } 376 377 template <typename T> friend class SpecificBumpPtrAllocator; 378 }; 379 380 /// The standard BumpPtrAllocator which just uses the default template 381 /// parameters. 382 typedef BumpPtrAllocatorImpl<> BumpPtrAllocator; 383 384 /// A BumpPtrAllocator that allows only elements of a specific type to be 385 /// allocated. 386 /// 387 /// This allows calling the destructor in DestroyAll() and when the allocator is 388 /// destroyed. 389 template <typename T> class SpecificBumpPtrAllocator { 390 BumpPtrAllocator Allocator; 391 392 public: 393 SpecificBumpPtrAllocator() { 394 // Because SpecificBumpPtrAllocator walks the memory to call destructors, 395 // it can't have red zones between allocations. 396 Allocator.setRedZoneSize(0); 397 } 398 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) 399 : Allocator(std::move(Old.Allocator)) {} 400 ~SpecificBumpPtrAllocator() { DestroyAll(); } 401 402 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { 403 Allocator = std::move(RHS.Allocator); 404 return *this; 405 } 406 407 /// Call the destructor of each allocated object and deallocate all but the 408 /// current slab and reset the current pointer to the beginning of it, freeing 409 /// all memory allocated so far. 410 void DestroyAll() { 411 auto DestroyElements = [](char *Begin, char *End) { 412 assert(Begin == (char *)alignAddr(Begin, Align::Of<T>())); 413 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) 414 reinterpret_cast<T *>(Ptr)->~T(); 415 }; 416 417 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E; 418 ++I) { 419 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( 420 std::distance(Allocator.Slabs.begin(), I)); 421 char *Begin = (char *)alignAddr(*I, Align::Of<T>()); 422 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr 423 : (char *)*I + AllocatedSlabSize; 424 425 DestroyElements(Begin, End); 426 } 427 428 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { 429 void *Ptr = PtrAndSize.first; 430 size_t Size = PtrAndSize.second; 431 DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()), 432 (char *)Ptr + Size); 433 } 434 435 Allocator.Reset(); 436 } 437 438 /// Allocate space for an array of objects without constructing them. 439 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); } 440 441 /// \return An index uniquely and reproducibly identifying 442 /// an input pointer \p Ptr in the given allocator. 443 /// Returns an empty optional if the pointer is not found in the allocator. 444 std::optional<int64_t> identifyObject(const void *Ptr) { 445 return Allocator.identifyObject(Ptr); 446 } 447 }; 448 449 } // end namespace llvm 450 451 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, 452 size_t GrowthDelay> 453 void * 454 operator new(size_t Size, 455 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold, 456 GrowthDelay> &Allocator) { 457 return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size), 458 alignof(std::max_align_t))); 459 } 460 461 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold, 462 size_t GrowthDelay> 463 void operator delete(void *, 464 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, 465 SizeThreshold, GrowthDelay> &) { 466 } 467 468 #endif // LLVM_SUPPORT_ALLOCATOR_H 469