xref: /llvm-project/libcxx/src/memory_resource.cpp (revision e99c4906e44ae3f921fa05356909d006cda8d954)
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include <cstddef>
10 #include <memory>
11 #include <memory_resource>
12 
13 #if _LIBCPP_HAS_ATOMIC_HEADER
14 #  include <atomic>
15 #elif !defined(_LIBCPP_HAS_NO_THREADS)
16 #  include <mutex>
17 #  if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
18 #    pragma comment(lib, "pthread")
19 #  endif
20 #endif
21 
22 _LIBCPP_BEGIN_NAMESPACE_STD
23 
24 namespace pmr {
25 
26 // memory_resource
27 
28 memory_resource::~memory_resource() = default;
29 
30 // new_delete_resource()
31 
32 #if !_LIBCPP_HAS_ALIGNED_ALLOCATION
33 static bool is_aligned_to(void* ptr, size_t align) {
34   void* p2     = ptr;
35   size_t space = 1;
36   void* result = std::align(align, 1, p2, space);
37   return (result == ptr);
38 }
39 #endif
40 
41 class _LIBCPP_EXPORTED_FROM_ABI __new_delete_memory_resource_imp : public memory_resource {
42   void* do_allocate(size_t bytes, size_t align) override {
43 #if _LIBCPP_HAS_ALIGNED_ALLOCATION
44     return std::__libcpp_allocate(bytes, align);
45 #else
46     if (bytes == 0)
47       bytes = 1;
48     void* result = std::__libcpp_allocate(bytes, align);
49     if (!is_aligned_to(result, align)) {
50       std::__libcpp_deallocate(result, bytes, align);
51       __throw_bad_alloc();
52     }
53     return result;
54 #endif
55   }
56 
57   void do_deallocate(void* p, size_t bytes, size_t align) override { std::__libcpp_deallocate(p, bytes, align); }
58 
59   bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; }
60 };
61 
62 // null_memory_resource()
63 
64 class _LIBCPP_EXPORTED_FROM_ABI __null_memory_resource_imp : public memory_resource {
65   void* do_allocate(size_t, size_t) override { __throw_bad_alloc(); }
66   void do_deallocate(void*, size_t, size_t) override {}
67   bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; }
68 };
69 
70 namespace {
71 
72 union ResourceInitHelper {
73   struct {
74     __new_delete_memory_resource_imp new_delete_res;
75     __null_memory_resource_imp null_res;
76   } resources;
77   char dummy;
78   constexpr ResourceInitHelper() : resources() {}
79   ~ResourceInitHelper() {}
80 };
81 
82 // Pretend we're inside a system header so the compiler doesn't flag the use of the init_priority
83 // attribute with a value that's reserved for the implementation (we're the implementation).
84 #include "memory_resource_init_helper.h"
85 
86 } // namespace
87 
88 memory_resource* new_delete_resource() noexcept { return &res_init.resources.new_delete_res; }
89 
90 memory_resource* null_memory_resource() noexcept { return &res_init.resources.null_res; }
91 
92 // default_memory_resource()
93 
94 static memory_resource* __default_memory_resource(bool set = false, memory_resource* new_res = nullptr) noexcept {
95 #if _LIBCPP_HAS_ATOMIC_HEADER
96   static constinit atomic<memory_resource*> __res{&res_init.resources.new_delete_res};
97   if (set) {
98     new_res = new_res ? new_res : new_delete_resource();
99     // TODO: Can a weaker ordering be used?
100     return std::atomic_exchange_explicit(&__res, new_res, memory_order_acq_rel);
101   } else {
102     return std::atomic_load_explicit(&__res, memory_order_acquire);
103   }
104 #elif !defined(_LIBCPP_HAS_NO_THREADS)
105   static constinit memory_resource* res = &res_init.resources.new_delete_res;
106   static mutex res_lock;
107   if (set) {
108     new_res = new_res ? new_res : new_delete_resource();
109     lock_guard<mutex> guard(res_lock);
110     memory_resource* old_res = res;
111     res                      = new_res;
112     return old_res;
113   } else {
114     lock_guard<mutex> guard(res_lock);
115     return res;
116   }
117 #else
118   static constinit memory_resource* res = &res_init.resources.new_delete_res;
119   if (set) {
120     new_res                  = new_res ? new_res : new_delete_resource();
121     memory_resource* old_res = res;
122     res                      = new_res;
123     return old_res;
124   } else {
125     return res;
126   }
127 #endif
128 }
129 
130 memory_resource* get_default_resource() noexcept { return __default_memory_resource(); }
131 
132 memory_resource* set_default_resource(memory_resource* __new_res) noexcept {
133   return __default_memory_resource(true, __new_res);
134 }
135 
136 // 23.12.5, mem.res.pool
137 
138 static size_t roundup(size_t count, size_t alignment) {
139   size_t mask = alignment - 1;
140   return (count + mask) & ~mask;
141 }
142 
143 struct unsynchronized_pool_resource::__adhoc_pool::__chunk_footer {
144   __chunk_footer* __next_;
145   char* __start_;
146   size_t __align_;
147   size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); }
148 };
149 
150 void unsynchronized_pool_resource::__adhoc_pool::__release_ptr(memory_resource* upstream) {
151   while (__first_ != nullptr) {
152     __chunk_footer* next = __first_->__next_;
153     upstream->deallocate(__first_->__start_, __first_->__allocation_size(), __first_->__align_);
154     __first_ = next;
155   }
156 }
157 
158 void* unsynchronized_pool_resource::__adhoc_pool::__do_allocate(memory_resource* upstream, size_t bytes, size_t align) {
159   const size_t footer_size  = sizeof(__chunk_footer);
160   const size_t footer_align = alignof(__chunk_footer);
161 
162   if (align < footer_align)
163     align = footer_align;
164 
165   size_t aligned_capacity = roundup(bytes, footer_align) + footer_size;
166 
167   void* result = upstream->allocate(aligned_capacity, align);
168 
169   __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size);
170   h->__next_        = __first_;
171   h->__start_       = (char*)result;
172   h->__align_       = align;
173   __first_          = h;
174   return result;
175 }
176 
177 void unsynchronized_pool_resource::__adhoc_pool::__do_deallocate(
178     memory_resource* upstream, void* p, size_t bytes, size_t align) {
179   _LIBCPP_ASSERT_NON_NULL(__first_ != nullptr, "deallocating a block that was not allocated with this allocator");
180   if (__first_->__start_ == p) {
181     __chunk_footer* next = __first_->__next_;
182     upstream->deallocate(p, __first_->__allocation_size(), __first_->__align_);
183     __first_ = next;
184   } else {
185     for (__chunk_footer* h = __first_; h->__next_ != nullptr; h = h->__next_) {
186       if (h->__next_->__start_ == p) {
187         __chunk_footer* next = h->__next_->__next_;
188         upstream->deallocate(p, h->__next_->__allocation_size(), h->__next_->__align_);
189         h->__next_ = next;
190         return;
191       }
192     }
193     // The request to deallocate memory ends up being a no-op, likely resulting in a memory leak.
194     _LIBCPP_ASSERT_VALID_DEALLOCATION(false, "deallocating a block that was not allocated with this allocator");
195   }
196 }
197 
198 class unsynchronized_pool_resource::__fixed_pool {
199   struct __chunk_footer {
200     __chunk_footer* __next_;
201     char* __start_;
202     size_t __align_;
203     size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); }
204   };
205 
206   struct __vacancy_header {
207     __vacancy_header* __next_vacancy_;
208   };
209 
210   __chunk_footer* __first_chunk_     = nullptr;
211   __vacancy_header* __first_vacancy_ = nullptr;
212 
213 public:
214   explicit __fixed_pool() = default;
215 
216   void __release_ptr(memory_resource* upstream) {
217     __first_vacancy_ = nullptr;
218     while (__first_chunk_ != nullptr) {
219       __chunk_footer* next = __first_chunk_->__next_;
220       upstream->deallocate(__first_chunk_->__start_, __first_chunk_->__allocation_size(), __first_chunk_->__align_);
221       __first_chunk_ = next;
222     }
223   }
224 
225   void* __try_allocate_from_vacancies() {
226     if (__first_vacancy_ != nullptr) {
227       void* result     = __first_vacancy_;
228       __first_vacancy_ = __first_vacancy_->__next_vacancy_;
229       return result;
230     }
231     return nullptr;
232   }
233 
234   void* __allocate_in_new_chunk(memory_resource* upstream, size_t block_size, size_t chunk_size) {
235     _LIBCPP_ASSERT_INTERNAL(chunk_size % block_size == 0, "");
236     static_assert(__default_alignment >= alignof(std::max_align_t), "");
237     static_assert(__default_alignment >= alignof(__chunk_footer), "");
238     static_assert(__default_alignment >= alignof(__vacancy_header), "");
239 
240     const size_t footer_size  = sizeof(__chunk_footer);
241     const size_t footer_align = alignof(__chunk_footer);
242 
243     size_t aligned_capacity = roundup(chunk_size, footer_align) + footer_size;
244 
245     void* result = upstream->allocate(aligned_capacity, __default_alignment);
246 
247     __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size);
248     h->__next_        = __first_chunk_;
249     h->__start_       = (char*)result;
250     h->__align_       = __default_alignment;
251     __first_chunk_    = h;
252 
253     if (chunk_size > block_size) {
254       __vacancy_header* last_vh = this->__first_vacancy_;
255       for (size_t i = block_size; i != chunk_size; i += block_size) {
256         __vacancy_header* vh = (__vacancy_header*)((char*)result + i);
257         vh->__next_vacancy_  = last_vh;
258         last_vh              = vh;
259       }
260       this->__first_vacancy_ = last_vh;
261     }
262     return result;
263   }
264 
265   void __evacuate(void* p) {
266     __vacancy_header* vh = (__vacancy_header*)(p);
267     vh->__next_vacancy_  = __first_vacancy_;
268     __first_vacancy_     = vh;
269   }
270 
271   size_t __previous_chunk_size_in_bytes() const { return __first_chunk_ ? __first_chunk_->__allocation_size() : 0; }
272 
273   static const size_t __default_alignment = alignof(max_align_t);
274 };
275 
276 size_t unsynchronized_pool_resource::__pool_block_size(int i) const { return size_t(1) << __log2_pool_block_size(i); }
277 
278 int unsynchronized_pool_resource::__log2_pool_block_size(int i) const { return (i + __log2_smallest_block_size); }
279 
280 int unsynchronized_pool_resource::__pool_index(size_t bytes, size_t align) const {
281   if (align > alignof(std::max_align_t) || bytes > (size_t(1) << __num_fixed_pools_))
282     return __num_fixed_pools_;
283   else {
284     int i = 0;
285     bytes = (bytes > align) ? bytes : align;
286     bytes -= 1;
287     bytes >>= __log2_smallest_block_size;
288     while (bytes != 0) {
289       bytes >>= 1;
290       i += 1;
291     }
292     return i;
293   }
294 }
295 
296 unsynchronized_pool_resource::unsynchronized_pool_resource(const pool_options& opts, memory_resource* upstream)
297     : __res_(upstream), __fixed_pools_(nullptr) {
298   size_t largest_block_size;
299   if (opts.largest_required_pool_block == 0)
300     largest_block_size = __default_largest_block_size;
301   else if (opts.largest_required_pool_block < __smallest_block_size)
302     largest_block_size = __smallest_block_size;
303   else if (opts.largest_required_pool_block > __max_largest_block_size)
304     largest_block_size = __max_largest_block_size;
305   else
306     largest_block_size = opts.largest_required_pool_block;
307 
308   if (opts.max_blocks_per_chunk == 0)
309     __options_max_blocks_per_chunk_ = __max_blocks_per_chunk;
310   else if (opts.max_blocks_per_chunk < __min_blocks_per_chunk)
311     __options_max_blocks_per_chunk_ = __min_blocks_per_chunk;
312   else if (opts.max_blocks_per_chunk > __max_blocks_per_chunk)
313     __options_max_blocks_per_chunk_ = __max_blocks_per_chunk;
314   else
315     __options_max_blocks_per_chunk_ = opts.max_blocks_per_chunk;
316 
317   __num_fixed_pools_ = 1;
318   size_t capacity    = __smallest_block_size;
319   while (capacity < largest_block_size) {
320     capacity <<= 1;
321     __num_fixed_pools_ += 1;
322   }
323 }
324 
325 pool_options unsynchronized_pool_resource::options() const {
326   pool_options p;
327   p.max_blocks_per_chunk        = __options_max_blocks_per_chunk_;
328   p.largest_required_pool_block = __pool_block_size(__num_fixed_pools_ - 1);
329   return p;
330 }
331 
332 void unsynchronized_pool_resource::release() {
333   __adhoc_pool_.__release_ptr(__res_);
334   if (__fixed_pools_ != nullptr) {
335     const int n = __num_fixed_pools_;
336     for (int i = 0; i < n; ++i)
337       __fixed_pools_[i].__release_ptr(__res_);
338     __res_->deallocate(__fixed_pools_, __num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool));
339     __fixed_pools_ = nullptr;
340   }
341 }
342 
343 void* unsynchronized_pool_resource::do_allocate(size_t bytes, size_t align) {
344   // A pointer to allocated storage (6.6.4.4.1) with a size of at least bytes.
345   // The size and alignment of the allocated memory shall meet the requirements for
346   // a class derived from memory_resource (23.12).
347   // If the pool selected for a block of size bytes is unable to satisfy the memory request
348   // from its own internal data structures, it will call upstream_resource()->allocate()
349   // to obtain more memory. If bytes is larger than that which the largest pool can handle,
350   // then memory will be allocated using upstream_resource()->allocate().
351 
352   int i = __pool_index(bytes, align);
353   if (i == __num_fixed_pools_)
354     return __adhoc_pool_.__do_allocate(__res_, bytes, align);
355   else {
356     if (__fixed_pools_ == nullptr) {
357       __fixed_pools_ =
358           (__fixed_pool*)__res_->allocate(__num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool));
359       __fixed_pool* first = __fixed_pools_;
360       __fixed_pool* last  = __fixed_pools_ + __num_fixed_pools_;
361       for (__fixed_pool* pool = first; pool != last; ++pool)
362         ::new ((void*)pool) __fixed_pool;
363     }
364     void* result = __fixed_pools_[i].__try_allocate_from_vacancies();
365     if (result == nullptr) {
366       auto min = [](size_t a, size_t b) { return a < b ? a : b; };
367       auto max = [](size_t a, size_t b) { return a < b ? b : a; };
368 
369       size_t prev_chunk_size_in_bytes  = __fixed_pools_[i].__previous_chunk_size_in_bytes();
370       size_t prev_chunk_size_in_blocks = prev_chunk_size_in_bytes >> __log2_pool_block_size(i);
371 
372       size_t chunk_size_in_blocks;
373 
374       if (prev_chunk_size_in_blocks == 0) {
375         size_t min_blocks_per_chunk = max(__min_bytes_per_chunk >> __log2_pool_block_size(i), __min_blocks_per_chunk);
376         chunk_size_in_blocks        = min_blocks_per_chunk;
377       } else {
378         static_assert(__max_bytes_per_chunk <= SIZE_MAX - (__max_bytes_per_chunk / 4), "unsigned overflow is possible");
379         chunk_size_in_blocks = prev_chunk_size_in_blocks + (prev_chunk_size_in_blocks / 4);
380       }
381 
382       size_t max_blocks_per_chunk =
383           min((__max_bytes_per_chunk >> __log2_pool_block_size(i)),
384               min(__max_blocks_per_chunk, __options_max_blocks_per_chunk_));
385       if (chunk_size_in_blocks > max_blocks_per_chunk)
386         chunk_size_in_blocks = max_blocks_per_chunk;
387 
388       size_t block_size = __pool_block_size(i);
389 
390       size_t chunk_size_in_bytes = (chunk_size_in_blocks << __log2_pool_block_size(i));
391       result                     = __fixed_pools_[i].__allocate_in_new_chunk(__res_, block_size, chunk_size_in_bytes);
392     }
393     return result;
394   }
395 }
396 
397 void unsynchronized_pool_resource::do_deallocate(void* p, size_t bytes, size_t align) {
398   // Returns the memory at p to the pool. It is unspecified if,
399   // or under what circumstances, this operation will result in
400   // a call to upstream_resource()->deallocate().
401 
402   int i = __pool_index(bytes, align);
403   if (i == __num_fixed_pools_)
404     return __adhoc_pool_.__do_deallocate(__res_, p, bytes, align);
405   else {
406     _LIBCPP_ASSERT_NON_NULL(
407         __fixed_pools_ != nullptr, "deallocating a block that was not allocated with this allocator");
408     __fixed_pools_[i].__evacuate(p);
409   }
410 }
411 
412 bool synchronized_pool_resource::do_is_equal(const memory_resource& other) const noexcept { return &other == this; }
413 
414 // 23.12.6, mem.res.monotonic.buffer
415 
416 static void* align_down(size_t align, size_t size, void*& ptr, size_t& space) {
417   if (size > space)
418     return nullptr;
419 
420   char* p1      = static_cast<char*>(ptr);
421   char* new_ptr = reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(p1 - size) & ~(align - 1));
422 
423   if (new_ptr < (p1 - space))
424     return nullptr;
425 
426   ptr = new_ptr;
427   space -= p1 - new_ptr;
428 
429   return ptr;
430 }
431 
432 void* monotonic_buffer_resource::__initial_descriptor::__try_allocate_from_chunk(size_t bytes, size_t align) {
433   if (!__cur_)
434     return nullptr;
435   void* new_ptr       = static_cast<void*>(__cur_);
436   size_t new_capacity = (__cur_ - __start_);
437   void* aligned_ptr   = align_down(align, bytes, new_ptr, new_capacity);
438   if (aligned_ptr != nullptr)
439     __cur_ = static_cast<char*>(new_ptr);
440   return aligned_ptr;
441 }
442 
443 void* monotonic_buffer_resource::__chunk_footer::__try_allocate_from_chunk(size_t bytes, size_t align) {
444   void* new_ptr       = static_cast<void*>(__cur_);
445   size_t new_capacity = (__cur_ - __start_);
446   void* aligned_ptr   = align_down(align, bytes, new_ptr, new_capacity);
447   if (aligned_ptr != nullptr)
448     __cur_ = static_cast<char*>(new_ptr);
449   return aligned_ptr;
450 }
451 
452 void* monotonic_buffer_resource::do_allocate(size_t bytes, size_t align) {
453   const size_t footer_size  = sizeof(__chunk_footer);
454   const size_t footer_align = alignof(__chunk_footer);
455 
456   auto previous_allocation_size = [&]() {
457     if (__chunks_ != nullptr)
458       return __chunks_->__allocation_size();
459 
460     size_t newsize = (__initial_.__start_ != nullptr) ? (__initial_.__end_ - __initial_.__start_) : __initial_.__size_;
461 
462     return roundup(newsize, footer_align) + footer_size;
463   };
464 
465   if (void* result = __initial_.__try_allocate_from_chunk(bytes, align))
466     return result;
467   if (__chunks_ != nullptr) {
468     if (void* result = __chunks_->__try_allocate_from_chunk(bytes, align))
469       return result;
470   }
471 
472   // Allocate a brand-new chunk.
473 
474   if (align < footer_align)
475     align = footer_align;
476 
477   size_t aligned_capacity  = roundup(bytes, footer_align) + footer_size;
478   size_t previous_capacity = previous_allocation_size();
479 
480   if (aligned_capacity <= previous_capacity) {
481     size_t newsize   = 2 * (previous_capacity - footer_size);
482     aligned_capacity = roundup(newsize, footer_align) + footer_size;
483   }
484 
485   char* start            = (char*)__res_->allocate(aligned_capacity, align);
486   auto end               = start + aligned_capacity - footer_size;
487   __chunk_footer* footer = (__chunk_footer*)(end);
488   footer->__next_        = __chunks_;
489   footer->__start_       = start;
490   footer->__cur_         = end;
491   footer->__align_       = align;
492   __chunks_              = footer;
493 
494   return __chunks_->__try_allocate_from_chunk(bytes, align);
495 }
496 
497 } // namespace pmr
498 
499 _LIBCPP_END_NAMESPACE_STD
500