1*4bdff4beSrobert //===----------------------------------------------------------------------===//
2*4bdff4beSrobert //
3*4bdff4beSrobert // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*4bdff4beSrobert // See https://llvm.org/LICENSE.txt for license information.
5*4bdff4beSrobert // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*4bdff4beSrobert //
7*4bdff4beSrobert //===----------------------------------------------------------------------===//
8*4bdff4beSrobert
9*4bdff4beSrobert #include <memory>
10*4bdff4beSrobert #include <memory_resource>
11*4bdff4beSrobert
12*4bdff4beSrobert #ifndef _LIBCPP_HAS_NO_ATOMIC_HEADER
13*4bdff4beSrobert # include <atomic>
14*4bdff4beSrobert #elif !defined(_LIBCPP_HAS_NO_THREADS)
15*4bdff4beSrobert # include <mutex>
16*4bdff4beSrobert # if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
17*4bdff4beSrobert # pragma comment(lib, "pthread")
18*4bdff4beSrobert # endif
19*4bdff4beSrobert #endif
20*4bdff4beSrobert
21*4bdff4beSrobert _LIBCPP_BEGIN_NAMESPACE_STD
22*4bdff4beSrobert
23*4bdff4beSrobert namespace pmr {
24*4bdff4beSrobert
25*4bdff4beSrobert // memory_resource
26*4bdff4beSrobert
27*4bdff4beSrobert memory_resource::~memory_resource() = default;
28*4bdff4beSrobert
29*4bdff4beSrobert // new_delete_resource()
30*4bdff4beSrobert
31*4bdff4beSrobert #ifdef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
is_aligned_to(void * ptr,size_t align)32*4bdff4beSrobert static bool is_aligned_to(void* ptr, size_t align) {
33*4bdff4beSrobert void* p2 = ptr;
34*4bdff4beSrobert size_t space = 1;
35*4bdff4beSrobert void* result = std::align(align, 1, p2, space);
36*4bdff4beSrobert return (result == ptr);
37*4bdff4beSrobert }
38*4bdff4beSrobert #endif
39*4bdff4beSrobert
40*4bdff4beSrobert class _LIBCPP_TYPE_VIS __new_delete_memory_resource_imp : public memory_resource {
do_allocate(size_t bytes,size_t align)41*4bdff4beSrobert void* do_allocate(size_t bytes, size_t align) override {
42*4bdff4beSrobert #ifndef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
43*4bdff4beSrobert return std::__libcpp_allocate(bytes, align);
44*4bdff4beSrobert #else
45*4bdff4beSrobert if (bytes == 0)
46*4bdff4beSrobert bytes = 1;
47*4bdff4beSrobert void* result = std::__libcpp_allocate(bytes, align);
48*4bdff4beSrobert if (!is_aligned_to(result, align)) {
49*4bdff4beSrobert std::__libcpp_deallocate(result, bytes, align);
50*4bdff4beSrobert __throw_bad_alloc();
51*4bdff4beSrobert }
52*4bdff4beSrobert return result;
53*4bdff4beSrobert #endif
54*4bdff4beSrobert }
55*4bdff4beSrobert
do_deallocate(void * p,size_t bytes,size_t align)56*4bdff4beSrobert void do_deallocate(void* p, size_t bytes, size_t align) override { std::__libcpp_deallocate(p, bytes, align); }
57*4bdff4beSrobert
do_is_equal(const memory_resource & other) const58*4bdff4beSrobert bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; }
59*4bdff4beSrobert };
60*4bdff4beSrobert
61*4bdff4beSrobert // null_memory_resource()
62*4bdff4beSrobert
63*4bdff4beSrobert class _LIBCPP_TYPE_VIS __null_memory_resource_imp : public memory_resource {
do_allocate(size_t,size_t)64*4bdff4beSrobert void* do_allocate(size_t, size_t) override { __throw_bad_alloc(); }
do_deallocate(void *,size_t,size_t)65*4bdff4beSrobert void do_deallocate(void*, size_t, size_t) override {}
do_is_equal(const memory_resource & other) const66*4bdff4beSrobert bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; }
67*4bdff4beSrobert };
68*4bdff4beSrobert
69*4bdff4beSrobert namespace {
70*4bdff4beSrobert
71*4bdff4beSrobert union ResourceInitHelper {
72*4bdff4beSrobert struct {
73*4bdff4beSrobert __new_delete_memory_resource_imp new_delete_res;
74*4bdff4beSrobert __null_memory_resource_imp null_res;
75*4bdff4beSrobert } resources;
76*4bdff4beSrobert char dummy;
ResourceInitHelper()77*4bdff4beSrobert _LIBCPP_CONSTEXPR_SINCE_CXX14 ResourceInitHelper() : resources() {}
~ResourceInitHelper()78*4bdff4beSrobert ~ResourceInitHelper() {}
79*4bdff4beSrobert };
80*4bdff4beSrobert
81*4bdff4beSrobert // Pretend we're inside a system header so the compiler doesn't flag the use of the init_priority
82*4bdff4beSrobert // attribute with a value that's reserved for the implementation (we're the implementation).
83*4bdff4beSrobert #include "memory_resource_init_helper.h"
84*4bdff4beSrobert
85*4bdff4beSrobert } // end namespace
86*4bdff4beSrobert
new_delete_resource()87*4bdff4beSrobert memory_resource* new_delete_resource() noexcept { return &res_init.resources.new_delete_res; }
88*4bdff4beSrobert
null_memory_resource()89*4bdff4beSrobert memory_resource* null_memory_resource() noexcept { return &res_init.resources.null_res; }
90*4bdff4beSrobert
91*4bdff4beSrobert // default_memory_resource()
92*4bdff4beSrobert
__default_memory_resource(bool set=false,memory_resource * new_res=nullptr)93*4bdff4beSrobert static memory_resource* __default_memory_resource(bool set = false, memory_resource* new_res = nullptr) noexcept {
94*4bdff4beSrobert #ifndef _LIBCPP_HAS_NO_ATOMIC_HEADER
95*4bdff4beSrobert static constinit atomic<memory_resource*> __res{&res_init.resources.new_delete_res};
96*4bdff4beSrobert if (set) {
97*4bdff4beSrobert new_res = new_res ? new_res : new_delete_resource();
98*4bdff4beSrobert // TODO: Can a weaker ordering be used?
99*4bdff4beSrobert return std::atomic_exchange_explicit(&__res, new_res, memory_order_acq_rel);
100*4bdff4beSrobert } else {
101*4bdff4beSrobert return std::atomic_load_explicit(&__res, memory_order_acquire);
102*4bdff4beSrobert }
103*4bdff4beSrobert #elif !defined(_LIBCPP_HAS_NO_THREADS)
104*4bdff4beSrobert static constinit memory_resource* res = &res_init.resources.new_delete_res;
105*4bdff4beSrobert static mutex res_lock;
106*4bdff4beSrobert if (set) {
107*4bdff4beSrobert new_res = new_res ? new_res : new_delete_resource();
108*4bdff4beSrobert lock_guard<mutex> guard(res_lock);
109*4bdff4beSrobert memory_resource* old_res = res;
110*4bdff4beSrobert res = new_res;
111*4bdff4beSrobert return old_res;
112*4bdff4beSrobert } else {
113*4bdff4beSrobert lock_guard<mutex> guard(res_lock);
114*4bdff4beSrobert return res;
115*4bdff4beSrobert }
116*4bdff4beSrobert #else
117*4bdff4beSrobert static constinit memory_resource* res = &res_init.resources.new_delete_res;
118*4bdff4beSrobert if (set) {
119*4bdff4beSrobert new_res = new_res ? new_res : new_delete_resource();
120*4bdff4beSrobert memory_resource* old_res = res;
121*4bdff4beSrobert res = new_res;
122*4bdff4beSrobert return old_res;
123*4bdff4beSrobert } else {
124*4bdff4beSrobert return res;
125*4bdff4beSrobert }
126*4bdff4beSrobert #endif
127*4bdff4beSrobert }
128*4bdff4beSrobert
get_default_resource()129*4bdff4beSrobert memory_resource* get_default_resource() noexcept { return __default_memory_resource(); }
130*4bdff4beSrobert
set_default_resource(memory_resource * __new_res)131*4bdff4beSrobert memory_resource* set_default_resource(memory_resource* __new_res) noexcept {
132*4bdff4beSrobert return __default_memory_resource(true, __new_res);
133*4bdff4beSrobert }
134*4bdff4beSrobert
135*4bdff4beSrobert // 23.12.5, mem.res.pool
136*4bdff4beSrobert
roundup(size_t count,size_t alignment)137*4bdff4beSrobert static size_t roundup(size_t count, size_t alignment) {
138*4bdff4beSrobert size_t mask = alignment - 1;
139*4bdff4beSrobert return (count + mask) & ~mask;
140*4bdff4beSrobert }
141*4bdff4beSrobert
142*4bdff4beSrobert struct unsynchronized_pool_resource::__adhoc_pool::__chunk_footer {
143*4bdff4beSrobert __chunk_footer* __next_;
144*4bdff4beSrobert char* __start_;
145*4bdff4beSrobert size_t __align_;
__allocation_sizepmr::unsynchronized_pool_resource::__adhoc_pool::__chunk_footer146*4bdff4beSrobert size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); }
147*4bdff4beSrobert };
148*4bdff4beSrobert
__release_ptr(memory_resource * upstream)149*4bdff4beSrobert void unsynchronized_pool_resource::__adhoc_pool::__release_ptr(memory_resource* upstream) {
150*4bdff4beSrobert while (__first_ != nullptr) {
151*4bdff4beSrobert __chunk_footer* next = __first_->__next_;
152*4bdff4beSrobert upstream->deallocate(__first_->__start_, __first_->__allocation_size(), __first_->__align_);
153*4bdff4beSrobert __first_ = next;
154*4bdff4beSrobert }
155*4bdff4beSrobert }
156*4bdff4beSrobert
__do_allocate(memory_resource * upstream,size_t bytes,size_t align)157*4bdff4beSrobert void* unsynchronized_pool_resource::__adhoc_pool::__do_allocate(memory_resource* upstream, size_t bytes, size_t align) {
158*4bdff4beSrobert const size_t footer_size = sizeof(__chunk_footer);
159*4bdff4beSrobert const size_t footer_align = alignof(__chunk_footer);
160*4bdff4beSrobert
161*4bdff4beSrobert if (align < footer_align)
162*4bdff4beSrobert align = footer_align;
163*4bdff4beSrobert
164*4bdff4beSrobert size_t aligned_capacity = roundup(bytes, footer_align) + footer_size;
165*4bdff4beSrobert
166*4bdff4beSrobert void* result = upstream->allocate(aligned_capacity, align);
167*4bdff4beSrobert
168*4bdff4beSrobert __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size);
169*4bdff4beSrobert h->__next_ = __first_;
170*4bdff4beSrobert h->__start_ = (char*)result;
171*4bdff4beSrobert h->__align_ = align;
172*4bdff4beSrobert __first_ = h;
173*4bdff4beSrobert return result;
174*4bdff4beSrobert }
175*4bdff4beSrobert
__do_deallocate(memory_resource * upstream,void * p,size_t bytes,size_t align)176*4bdff4beSrobert void unsynchronized_pool_resource::__adhoc_pool::__do_deallocate(
177*4bdff4beSrobert memory_resource* upstream, void* p, size_t bytes, size_t align) {
178*4bdff4beSrobert _LIBCPP_ASSERT(__first_ != nullptr, "deallocating a block that was not allocated with this allocator");
179*4bdff4beSrobert if (__first_->__start_ == p) {
180*4bdff4beSrobert __chunk_footer* next = __first_->__next_;
181*4bdff4beSrobert upstream->deallocate(p, __first_->__allocation_size(), __first_->__align_);
182*4bdff4beSrobert __first_ = next;
183*4bdff4beSrobert } else {
184*4bdff4beSrobert for (__chunk_footer* h = __first_; h->__next_ != nullptr; h = h->__next_) {
185*4bdff4beSrobert if (h->__next_->__start_ == p) {
186*4bdff4beSrobert __chunk_footer* next = h->__next_->__next_;
187*4bdff4beSrobert upstream->deallocate(p, h->__next_->__allocation_size(), h->__next_->__align_);
188*4bdff4beSrobert h->__next_ = next;
189*4bdff4beSrobert return;
190*4bdff4beSrobert }
191*4bdff4beSrobert }
192*4bdff4beSrobert _LIBCPP_ASSERT(false, "deallocating a block that was not allocated with this allocator");
193*4bdff4beSrobert }
194*4bdff4beSrobert }
195*4bdff4beSrobert
196*4bdff4beSrobert class unsynchronized_pool_resource::__fixed_pool {
197*4bdff4beSrobert struct __chunk_footer {
198*4bdff4beSrobert __chunk_footer* __next_;
199*4bdff4beSrobert char* __start_;
200*4bdff4beSrobert size_t __align_;
__allocation_sizepmr::unsynchronized_pool_resource::__fixed_pool::__chunk_footer201*4bdff4beSrobert size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); }
202*4bdff4beSrobert };
203*4bdff4beSrobert
204*4bdff4beSrobert struct __vacancy_header {
205*4bdff4beSrobert __vacancy_header* __next_vacancy_;
206*4bdff4beSrobert };
207*4bdff4beSrobert
208*4bdff4beSrobert __chunk_footer* __first_chunk_ = nullptr;
209*4bdff4beSrobert __vacancy_header* __first_vacancy_ = nullptr;
210*4bdff4beSrobert
211*4bdff4beSrobert public:
212*4bdff4beSrobert explicit __fixed_pool() = default;
213*4bdff4beSrobert
__release_ptr(memory_resource * upstream)214*4bdff4beSrobert void __release_ptr(memory_resource* upstream) {
215*4bdff4beSrobert __first_vacancy_ = nullptr;
216*4bdff4beSrobert while (__first_chunk_ != nullptr) {
217*4bdff4beSrobert __chunk_footer* next = __first_chunk_->__next_;
218*4bdff4beSrobert upstream->deallocate(__first_chunk_->__start_, __first_chunk_->__allocation_size(), __first_chunk_->__align_);
219*4bdff4beSrobert __first_chunk_ = next;
220*4bdff4beSrobert }
221*4bdff4beSrobert }
222*4bdff4beSrobert
__try_allocate_from_vacancies()223*4bdff4beSrobert void* __try_allocate_from_vacancies() {
224*4bdff4beSrobert if (__first_vacancy_ != nullptr) {
225*4bdff4beSrobert void* result = __first_vacancy_;
226*4bdff4beSrobert __first_vacancy_ = __first_vacancy_->__next_vacancy_;
227*4bdff4beSrobert return result;
228*4bdff4beSrobert }
229*4bdff4beSrobert return nullptr;
230*4bdff4beSrobert }
231*4bdff4beSrobert
__allocate_in_new_chunk(memory_resource * upstream,size_t block_size,size_t chunk_size)232*4bdff4beSrobert void* __allocate_in_new_chunk(memory_resource* upstream, size_t block_size, size_t chunk_size) {
233*4bdff4beSrobert _LIBCPP_ASSERT(chunk_size % block_size == 0, "");
234*4bdff4beSrobert static_assert(__default_alignment >= alignof(std::max_align_t), "");
235*4bdff4beSrobert static_assert(__default_alignment >= alignof(__chunk_footer), "");
236*4bdff4beSrobert static_assert(__default_alignment >= alignof(__vacancy_header), "");
237*4bdff4beSrobert
238*4bdff4beSrobert const size_t footer_size = sizeof(__chunk_footer);
239*4bdff4beSrobert const size_t footer_align = alignof(__chunk_footer);
240*4bdff4beSrobert
241*4bdff4beSrobert size_t aligned_capacity = roundup(chunk_size, footer_align) + footer_size;
242*4bdff4beSrobert
243*4bdff4beSrobert void* result = upstream->allocate(aligned_capacity, __default_alignment);
244*4bdff4beSrobert
245*4bdff4beSrobert __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size);
246*4bdff4beSrobert h->__next_ = __first_chunk_;
247*4bdff4beSrobert h->__start_ = (char*)result;
248*4bdff4beSrobert h->__align_ = __default_alignment;
249*4bdff4beSrobert __first_chunk_ = h;
250*4bdff4beSrobert
251*4bdff4beSrobert if (chunk_size > block_size) {
252*4bdff4beSrobert __vacancy_header* last_vh = this->__first_vacancy_;
253*4bdff4beSrobert for (size_t i = block_size; i != chunk_size; i += block_size) {
254*4bdff4beSrobert __vacancy_header* vh = (__vacancy_header*)((char*)result + i);
255*4bdff4beSrobert vh->__next_vacancy_ = last_vh;
256*4bdff4beSrobert last_vh = vh;
257*4bdff4beSrobert }
258*4bdff4beSrobert this->__first_vacancy_ = last_vh;
259*4bdff4beSrobert }
260*4bdff4beSrobert return result;
261*4bdff4beSrobert }
262*4bdff4beSrobert
__evacuate(void * p)263*4bdff4beSrobert void __evacuate(void* p) {
264*4bdff4beSrobert __vacancy_header* vh = (__vacancy_header*)(p);
265*4bdff4beSrobert vh->__next_vacancy_ = __first_vacancy_;
266*4bdff4beSrobert __first_vacancy_ = vh;
267*4bdff4beSrobert }
268*4bdff4beSrobert
__previous_chunk_size_in_bytes() const269*4bdff4beSrobert size_t __previous_chunk_size_in_bytes() const { return __first_chunk_ ? __first_chunk_->__allocation_size() : 0; }
270*4bdff4beSrobert
271*4bdff4beSrobert static const size_t __default_alignment = alignof(max_align_t);
272*4bdff4beSrobert };
273*4bdff4beSrobert
__pool_block_size(int i) const274*4bdff4beSrobert size_t unsynchronized_pool_resource::__pool_block_size(int i) const { return size_t(1) << __log2_pool_block_size(i); }
275*4bdff4beSrobert
__log2_pool_block_size(int i) const276*4bdff4beSrobert int unsynchronized_pool_resource::__log2_pool_block_size(int i) const { return (i + __log2_smallest_block_size); }
277*4bdff4beSrobert
__pool_index(size_t bytes,size_t align) const278*4bdff4beSrobert int unsynchronized_pool_resource::__pool_index(size_t bytes, size_t align) const {
279*4bdff4beSrobert if (align > alignof(std::max_align_t) || bytes > (size_t(1) << __num_fixed_pools_))
280*4bdff4beSrobert return __num_fixed_pools_;
281*4bdff4beSrobert else {
282*4bdff4beSrobert int i = 0;
283*4bdff4beSrobert bytes = (bytes > align) ? bytes : align;
284*4bdff4beSrobert bytes -= 1;
285*4bdff4beSrobert bytes >>= __log2_smallest_block_size;
286*4bdff4beSrobert while (bytes != 0) {
287*4bdff4beSrobert bytes >>= 1;
288*4bdff4beSrobert i += 1;
289*4bdff4beSrobert }
290*4bdff4beSrobert return i;
291*4bdff4beSrobert }
292*4bdff4beSrobert }
293*4bdff4beSrobert
unsynchronized_pool_resource(const pool_options & opts,memory_resource * upstream)294*4bdff4beSrobert unsynchronized_pool_resource::unsynchronized_pool_resource(const pool_options& opts, memory_resource* upstream)
295*4bdff4beSrobert : __res_(upstream), __fixed_pools_(nullptr) {
296*4bdff4beSrobert size_t largest_block_size;
297*4bdff4beSrobert if (opts.largest_required_pool_block == 0)
298*4bdff4beSrobert largest_block_size = __default_largest_block_size;
299*4bdff4beSrobert else if (opts.largest_required_pool_block < __smallest_block_size)
300*4bdff4beSrobert largest_block_size = __smallest_block_size;
301*4bdff4beSrobert else if (opts.largest_required_pool_block > __max_largest_block_size)
302*4bdff4beSrobert largest_block_size = __max_largest_block_size;
303*4bdff4beSrobert else
304*4bdff4beSrobert largest_block_size = opts.largest_required_pool_block;
305*4bdff4beSrobert
306*4bdff4beSrobert if (opts.max_blocks_per_chunk == 0)
307*4bdff4beSrobert __options_max_blocks_per_chunk_ = __max_blocks_per_chunk;
308*4bdff4beSrobert else if (opts.max_blocks_per_chunk < __min_blocks_per_chunk)
309*4bdff4beSrobert __options_max_blocks_per_chunk_ = __min_blocks_per_chunk;
310*4bdff4beSrobert else if (opts.max_blocks_per_chunk > __max_blocks_per_chunk)
311*4bdff4beSrobert __options_max_blocks_per_chunk_ = __max_blocks_per_chunk;
312*4bdff4beSrobert else
313*4bdff4beSrobert __options_max_blocks_per_chunk_ = opts.max_blocks_per_chunk;
314*4bdff4beSrobert
315*4bdff4beSrobert __num_fixed_pools_ = 1;
316*4bdff4beSrobert size_t capacity = __smallest_block_size;
317*4bdff4beSrobert while (capacity < largest_block_size) {
318*4bdff4beSrobert capacity <<= 1;
319*4bdff4beSrobert __num_fixed_pools_ += 1;
320*4bdff4beSrobert }
321*4bdff4beSrobert }
322*4bdff4beSrobert
options() const323*4bdff4beSrobert pool_options unsynchronized_pool_resource::options() const {
324*4bdff4beSrobert pool_options p;
325*4bdff4beSrobert p.max_blocks_per_chunk = __options_max_blocks_per_chunk_;
326*4bdff4beSrobert p.largest_required_pool_block = __pool_block_size(__num_fixed_pools_ - 1);
327*4bdff4beSrobert return p;
328*4bdff4beSrobert }
329*4bdff4beSrobert
release()330*4bdff4beSrobert void unsynchronized_pool_resource::release() {
331*4bdff4beSrobert __adhoc_pool_.__release_ptr(__res_);
332*4bdff4beSrobert if (__fixed_pools_ != nullptr) {
333*4bdff4beSrobert const int n = __num_fixed_pools_;
334*4bdff4beSrobert for (int i = 0; i < n; ++i)
335*4bdff4beSrobert __fixed_pools_[i].__release_ptr(__res_);
336*4bdff4beSrobert __res_->deallocate(__fixed_pools_, __num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool));
337*4bdff4beSrobert __fixed_pools_ = nullptr;
338*4bdff4beSrobert }
339*4bdff4beSrobert }
340*4bdff4beSrobert
do_allocate(size_t bytes,size_t align)341*4bdff4beSrobert void* unsynchronized_pool_resource::do_allocate(size_t bytes, size_t align) {
342*4bdff4beSrobert // A pointer to allocated storage (6.6.4.4.1) with a size of at least bytes.
343*4bdff4beSrobert // The size and alignment of the allocated memory shall meet the requirements for
344*4bdff4beSrobert // a class derived from memory_resource (23.12).
345*4bdff4beSrobert // If the pool selected for a block of size bytes is unable to satisfy the memory request
346*4bdff4beSrobert // from its own internal data structures, it will call upstream_resource()->allocate()
347*4bdff4beSrobert // to obtain more memory. If bytes is larger than that which the largest pool can handle,
348*4bdff4beSrobert // then memory will be allocated using upstream_resource()->allocate().
349*4bdff4beSrobert
350*4bdff4beSrobert int i = __pool_index(bytes, align);
351*4bdff4beSrobert if (i == __num_fixed_pools_)
352*4bdff4beSrobert return __adhoc_pool_.__do_allocate(__res_, bytes, align);
353*4bdff4beSrobert else {
354*4bdff4beSrobert if (__fixed_pools_ == nullptr) {
355*4bdff4beSrobert __fixed_pools_ =
356*4bdff4beSrobert (__fixed_pool*)__res_->allocate(__num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool));
357*4bdff4beSrobert __fixed_pool* first = __fixed_pools_;
358*4bdff4beSrobert __fixed_pool* last = __fixed_pools_ + __num_fixed_pools_;
359*4bdff4beSrobert for (__fixed_pool* pool = first; pool != last; ++pool)
360*4bdff4beSrobert ::new ((void*)pool) __fixed_pool;
361*4bdff4beSrobert }
362*4bdff4beSrobert void* result = __fixed_pools_[i].__try_allocate_from_vacancies();
363*4bdff4beSrobert if (result == nullptr) {
364*4bdff4beSrobert auto min = [](size_t a, size_t b) { return a < b ? a : b; };
365*4bdff4beSrobert auto max = [](size_t a, size_t b) { return a < b ? b : a; };
366*4bdff4beSrobert
367*4bdff4beSrobert size_t prev_chunk_size_in_bytes = __fixed_pools_[i].__previous_chunk_size_in_bytes();
368*4bdff4beSrobert size_t prev_chunk_size_in_blocks = prev_chunk_size_in_bytes >> __log2_pool_block_size(i);
369*4bdff4beSrobert
370*4bdff4beSrobert size_t chunk_size_in_blocks;
371*4bdff4beSrobert
372*4bdff4beSrobert if (prev_chunk_size_in_blocks == 0) {
373*4bdff4beSrobert size_t min_blocks_per_chunk = max(__min_bytes_per_chunk >> __log2_pool_block_size(i), __min_blocks_per_chunk);
374*4bdff4beSrobert chunk_size_in_blocks = min_blocks_per_chunk;
375*4bdff4beSrobert } else {
376*4bdff4beSrobert static_assert(__max_bytes_per_chunk <= SIZE_MAX - (__max_bytes_per_chunk / 4), "unsigned overflow is possible");
377*4bdff4beSrobert chunk_size_in_blocks = prev_chunk_size_in_blocks + (prev_chunk_size_in_blocks / 4);
378*4bdff4beSrobert }
379*4bdff4beSrobert
380*4bdff4beSrobert size_t max_blocks_per_chunk =
381*4bdff4beSrobert min((__max_bytes_per_chunk >> __log2_pool_block_size(i)),
382*4bdff4beSrobert min(__max_blocks_per_chunk, __options_max_blocks_per_chunk_));
383*4bdff4beSrobert if (chunk_size_in_blocks > max_blocks_per_chunk)
384*4bdff4beSrobert chunk_size_in_blocks = max_blocks_per_chunk;
385*4bdff4beSrobert
386*4bdff4beSrobert size_t block_size = __pool_block_size(i);
387*4bdff4beSrobert
388*4bdff4beSrobert size_t chunk_size_in_bytes = (chunk_size_in_blocks << __log2_pool_block_size(i));
389*4bdff4beSrobert result = __fixed_pools_[i].__allocate_in_new_chunk(__res_, block_size, chunk_size_in_bytes);
390*4bdff4beSrobert }
391*4bdff4beSrobert return result;
392*4bdff4beSrobert }
393*4bdff4beSrobert }
394*4bdff4beSrobert
do_deallocate(void * p,size_t bytes,size_t align)395*4bdff4beSrobert void unsynchronized_pool_resource::do_deallocate(void* p, size_t bytes, size_t align) {
396*4bdff4beSrobert // Returns the memory at p to the pool. It is unspecified if,
397*4bdff4beSrobert // or under what circumstances, this operation will result in
398*4bdff4beSrobert // a call to upstream_resource()->deallocate().
399*4bdff4beSrobert
400*4bdff4beSrobert int i = __pool_index(bytes, align);
401*4bdff4beSrobert if (i == __num_fixed_pools_)
402*4bdff4beSrobert return __adhoc_pool_.__do_deallocate(__res_, p, bytes, align);
403*4bdff4beSrobert else {
404*4bdff4beSrobert _LIBCPP_ASSERT(__fixed_pools_ != nullptr, "deallocating a block that was not allocated with this allocator");
405*4bdff4beSrobert __fixed_pools_[i].__evacuate(p);
406*4bdff4beSrobert }
407*4bdff4beSrobert }
408*4bdff4beSrobert
do_is_equal(const memory_resource & other) const409*4bdff4beSrobert bool synchronized_pool_resource::do_is_equal(const memory_resource& other) const noexcept { return &other == this; }
410*4bdff4beSrobert
411*4bdff4beSrobert // 23.12.6, mem.res.monotonic.buffer
412*4bdff4beSrobert
align_down(size_t align,size_t size,void * & ptr,size_t & space)413*4bdff4beSrobert static void* align_down(size_t align, size_t size, void*& ptr, size_t& space) {
414*4bdff4beSrobert if (size > space)
415*4bdff4beSrobert return nullptr;
416*4bdff4beSrobert
417*4bdff4beSrobert char* p1 = static_cast<char*>(ptr);
418*4bdff4beSrobert char* new_ptr = reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(p1 - size) & ~(align - 1));
419*4bdff4beSrobert
420*4bdff4beSrobert if (new_ptr < (p1 - space))
421*4bdff4beSrobert return nullptr;
422*4bdff4beSrobert
423*4bdff4beSrobert ptr = new_ptr;
424*4bdff4beSrobert space -= p1 - new_ptr;
425*4bdff4beSrobert
426*4bdff4beSrobert return ptr;
427*4bdff4beSrobert }
428*4bdff4beSrobert
__try_allocate_from_chunk(size_t bytes,size_t align)429*4bdff4beSrobert void* monotonic_buffer_resource::__initial_descriptor::__try_allocate_from_chunk(size_t bytes, size_t align) {
430*4bdff4beSrobert if (!__cur_)
431*4bdff4beSrobert return nullptr;
432*4bdff4beSrobert void* new_ptr = static_cast<void*>(__cur_);
433*4bdff4beSrobert size_t new_capacity = (__cur_ - __start_);
434*4bdff4beSrobert void* aligned_ptr = align_down(align, bytes, new_ptr, new_capacity);
435*4bdff4beSrobert if (aligned_ptr != nullptr)
436*4bdff4beSrobert __cur_ = static_cast<char*>(new_ptr);
437*4bdff4beSrobert return aligned_ptr;
438*4bdff4beSrobert }
439*4bdff4beSrobert
__try_allocate_from_chunk(size_t bytes,size_t align)440*4bdff4beSrobert void* monotonic_buffer_resource::__chunk_footer::__try_allocate_from_chunk(size_t bytes, size_t align) {
441*4bdff4beSrobert void* new_ptr = static_cast<void*>(__cur_);
442*4bdff4beSrobert size_t new_capacity = (__cur_ - __start_);
443*4bdff4beSrobert void* aligned_ptr = align_down(align, bytes, new_ptr, new_capacity);
444*4bdff4beSrobert if (aligned_ptr != nullptr)
445*4bdff4beSrobert __cur_ = static_cast<char*>(new_ptr);
446*4bdff4beSrobert return aligned_ptr;
447*4bdff4beSrobert }
448*4bdff4beSrobert
do_allocate(size_t bytes,size_t align)449*4bdff4beSrobert void* monotonic_buffer_resource::do_allocate(size_t bytes, size_t align) {
450*4bdff4beSrobert const size_t footer_size = sizeof(__chunk_footer);
451*4bdff4beSrobert const size_t footer_align = alignof(__chunk_footer);
452*4bdff4beSrobert
453*4bdff4beSrobert auto previous_allocation_size = [&]() {
454*4bdff4beSrobert if (__chunks_ != nullptr)
455*4bdff4beSrobert return __chunks_->__allocation_size();
456*4bdff4beSrobert
457*4bdff4beSrobert size_t newsize = (__initial_.__start_ != nullptr) ? (__initial_.__end_ - __initial_.__start_) : __initial_.__size_;
458*4bdff4beSrobert
459*4bdff4beSrobert return roundup(newsize, footer_align) + footer_size;
460*4bdff4beSrobert };
461*4bdff4beSrobert
462*4bdff4beSrobert if (void* result = __initial_.__try_allocate_from_chunk(bytes, align))
463*4bdff4beSrobert return result;
464*4bdff4beSrobert if (__chunks_ != nullptr) {
465*4bdff4beSrobert if (void* result = __chunks_->__try_allocate_from_chunk(bytes, align))
466*4bdff4beSrobert return result;
467*4bdff4beSrobert }
468*4bdff4beSrobert
469*4bdff4beSrobert // Allocate a brand-new chunk.
470*4bdff4beSrobert
471*4bdff4beSrobert if (align < footer_align)
472*4bdff4beSrobert align = footer_align;
473*4bdff4beSrobert
474*4bdff4beSrobert size_t aligned_capacity = roundup(bytes, footer_align) + footer_size;
475*4bdff4beSrobert size_t previous_capacity = previous_allocation_size();
476*4bdff4beSrobert
477*4bdff4beSrobert if (aligned_capacity <= previous_capacity) {
478*4bdff4beSrobert size_t newsize = 2 * (previous_capacity - footer_size);
479*4bdff4beSrobert aligned_capacity = roundup(newsize, footer_align) + footer_size;
480*4bdff4beSrobert }
481*4bdff4beSrobert
482*4bdff4beSrobert char* start = (char*)__res_->allocate(aligned_capacity, align);
483*4bdff4beSrobert auto end = start + aligned_capacity - footer_size;
484*4bdff4beSrobert __chunk_footer* footer = (__chunk_footer*)(end);
485*4bdff4beSrobert footer->__next_ = __chunks_;
486*4bdff4beSrobert footer->__start_ = start;
487*4bdff4beSrobert footer->__cur_ = end;
488*4bdff4beSrobert footer->__align_ = align;
489*4bdff4beSrobert __chunks_ = footer;
490*4bdff4beSrobert
491*4bdff4beSrobert return __chunks_->__try_allocate_from_chunk(bytes, align);
492*4bdff4beSrobert }
493*4bdff4beSrobert
494*4bdff4beSrobert } // namespace pmr
495*4bdff4beSrobert
496*4bdff4beSrobert _LIBCPP_END_NAMESPACE_STD
497