1 // <memory_resource> implementation -*- C++ -*- 2 3 // Copyright (C) 2018-2020 Free Software Foundation, Inc. 4 // 5 // This file is part of the GNU ISO C++ Library. This library is free 6 // software; you can redistribute it and/or modify it under the 7 // terms of the GNU General Public License as published by the 8 // Free Software Foundation; either version 3, or (at your option) 9 // any later version. 10 11 // This library is distributed in the hope that it will be useful, 12 // but WITHOUT ANY WARRANTY; without even the implied warranty of 13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 // GNU General Public License for more details. 15 16 // Under Section 7 of GPL version 3, you are granted additional 17 // permissions described in the GCC Runtime Library Exception, version 18 // 3.1, as published by the Free Software Foundation. 19 20 // You should have received a copy of the GNU General Public License and 21 // a copy of the GCC Runtime Library Exception along with this program; 22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23 // <http://www.gnu.org/licenses/>. 24 25 #include <memory_resource> 26 #include <algorithm> // lower_bound, rotate 27 #include <atomic> 28 #include <bit> // has_single_bit, bit_ceil, bit_width 29 #include <new> 30 #if ATOMIC_POINTER_LOCK_FREE != 2 31 # include <bits/std_mutex.h> // std::mutex, std::lock_guard 32 # include <bits/move.h> // std::exchange 33 #endif 34 35 namespace std _GLIBCXX_VISIBILITY(default) 36 { 37 _GLIBCXX_BEGIN_NAMESPACE_VERSION 38 namespace pmr 39 { 40 // This was defined inline in 9.1 and 9.2 so code compiled by those 41 // versions will not use this symbol. 42 memory_resource::~memory_resource() = default; 43 44 namespace 45 { 46 class newdel_res_t final : public memory_resource 47 { 48 void* 49 do_allocate(size_t __bytes, size_t __alignment) override 50 { return ::operator new(__bytes, std::align_val_t(__alignment)); } 51 52 void 53 do_deallocate(void* __p, size_t __bytes, size_t __alignment) noexcept 54 override 55 { ::operator delete(__p, __bytes, std::align_val_t(__alignment)); } 56 57 bool 58 do_is_equal(const memory_resource& __other) const noexcept override 59 { return &__other == this; } 60 }; 61 62 class null_res_t final : public memory_resource 63 { 64 void* 65 do_allocate(size_t, size_t) override 66 { std::__throw_bad_alloc(); } 67 68 void 69 do_deallocate(void*, size_t, size_t) noexcept override 70 { } 71 72 bool 73 do_is_equal(const memory_resource& __other) const noexcept override 74 { return &__other == this; } 75 }; 76 77 template<typename T> 78 struct constant_init 79 { 80 union { 81 unsigned char unused; 82 T obj; 83 }; 84 constexpr constant_init() : obj() { } 85 86 template<typename U> 87 explicit constexpr constant_init(U arg) : obj(arg) { } 88 89 ~constant_init() { /* do nothing, union member is not destroyed */ } 90 }; 91 92 __constinit constant_init<newdel_res_t> newdel_res{}; 93 __constinit constant_init<null_res_t> null_res{}; 94 #if ATOMIC_POINTER_LOCK_FREE == 2 95 using atomic_mem_res = atomic<memory_resource*>; 96 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED 97 #elif defined(_GLIBCXX_HAS_GTHREADS) 98 // Can't use pointer-width atomics, define a type using a mutex instead: 99 struct atomic_mem_res 100 { 101 # ifdef __GTHREAD_MUTEX_INIT 102 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED 103 // std::mutex has constexpr constructor 104 constexpr 105 # endif 106 atomic_mem_res(memory_resource* r) : val(r) { } 107 108 mutex mx; 109 memory_resource* val; 110 111 memory_resource* load() 112 { 113 lock_guard<mutex> lock(mx); 114 return val; 115 } 116 117 memory_resource* exchange(memory_resource* r) 118 { 119 lock_guard<mutex> lock(mx); 120 return std::exchange(val, r); 121 } 122 }; 123 #else 124 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED 125 // Single-threaded, no need for synchronization 126 struct atomic_mem_res 127 { 128 constexpr 129 atomic_mem_res(memory_resource* r) : val(r) { } 130 131 memory_resource* val; 132 133 memory_resource* load() const 134 { 135 return val; 136 } 137 138 memory_resource* exchange(memory_resource* r) 139 { 140 return std::exchange(val, r); 141 } 142 }; 143 #endif // ATOMIC_POINTER_LOCK_FREE == 2 144 145 #ifdef _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED 146 __constinit constant_init<atomic_mem_res> default_res{&newdel_res.obj}; 147 #else 148 # include "default_resource.h" 149 #endif 150 } // namespace 151 152 memory_resource* 153 new_delete_resource() noexcept 154 { return &newdel_res.obj; } 155 156 memory_resource* 157 null_memory_resource() noexcept 158 { return &null_res.obj; } 159 160 memory_resource* 161 set_default_resource(memory_resource* r) noexcept 162 { 163 if (r == nullptr) 164 r = new_delete_resource(); 165 return default_res.obj.exchange(r); 166 } 167 168 memory_resource* 169 get_default_resource() noexcept 170 { return default_res.obj.load(); } 171 172 // Member functions for std::pmr::monotonic_buffer_resource 173 174 // This was defined inline in 9.1 and 9.2 so code compiled by those 175 // versions will not use this symbol. 176 monotonic_buffer_resource::~monotonic_buffer_resource() { release(); } 177 178 // Memory allocated by the upstream resource is managed in a linked list 179 // of _Chunk objects. A _Chunk object recording the size and alignment of 180 // the allocated block and a pointer to the previous chunk is placed 181 // at end of the block. 182 class monotonic_buffer_resource::_Chunk 183 { 184 public: 185 // Return the address and size of a block of memory allocated from __r, 186 // of at least __size bytes and aligned to __align. 187 // Add a new _Chunk to the front of the linked list at __head. 188 static pair<void*, size_t> 189 allocate(memory_resource* __r, size_t __size, size_t __align, 190 _Chunk*& __head) 191 { 192 __size = std::__bit_ceil(__size + sizeof(_Chunk)); 193 194 if constexpr (alignof(_Chunk) > 1) 195 { 196 // PR libstdc++/90046 197 // For targets like epiphany-elf where alignof(_Chunk) != 1 198 // ensure that the last sizeof(_Chunk) bytes in the buffer 199 // are suitably-aligned for a _Chunk. 200 // This should be unnecessary, because the caller already 201 // passes in max(__align, alignof(max_align_t)). 202 if (__align < alignof(_Chunk)) 203 __align = alignof(_Chunk); 204 } 205 206 void* __p = __r->allocate(__size, __align); 207 208 // Add a chunk defined by (__p, __size, __align) to linked list __head. 209 void* const __back = (char*)__p + __size - sizeof(_Chunk); 210 __head = ::new(__back) _Chunk(__size, __align, __head); 211 return { __p, __size - sizeof(_Chunk) }; 212 } 213 214 // Return every chunk in linked list __head to resource __r. 215 static void 216 release(_Chunk*& __head, memory_resource* __r) noexcept 217 { 218 _Chunk* __next = __head; 219 __head = nullptr; 220 while (__next) 221 { 222 _Chunk* __ch = __next; 223 __builtin_memcpy(&__next, __ch->_M_next, sizeof(_Chunk*)); 224 225 __glibcxx_assert(__ch->_M_canary != 0); 226 __glibcxx_assert(__ch->_M_canary == (__ch->_M_size|__ch->_M_align)); 227 228 if (__ch->_M_canary != (__ch->_M_size | __ch->_M_align)) 229 return; // buffer overflow detected! 230 231 size_t __size = (size_t)1 << __ch->_M_size; 232 size_t __align = (size_t)1 << __ch->_M_align; 233 void* __start = (char*)(__ch + 1) - __size; 234 __r->deallocate(__start, __size, __align); 235 } 236 } 237 238 private: 239 _Chunk(size_t __size, size_t __align, _Chunk* __next) noexcept 240 : _M_size(std::__bit_width(__size) - 1), 241 _M_align(std::__bit_width(__align) - 1) 242 { 243 __builtin_memcpy(_M_next, &__next, sizeof(__next)); 244 _M_canary = _M_size | _M_align; 245 } 246 247 unsigned char _M_canary; 248 unsigned char _M_size; 249 unsigned char _M_align; 250 unsigned char _M_next[sizeof(_Chunk*)]; 251 }; 252 253 void 254 monotonic_buffer_resource::_M_new_buffer(size_t bytes, size_t alignment) 255 { 256 const size_t n = std::max(bytes, _M_next_bufsiz); 257 const size_t m = std::max(alignment, alignof(std::max_align_t)); 258 auto [p, size] = _Chunk::allocate(_M_upstream, n, m, _M_head); 259 _M_current_buf = p; 260 _M_avail = size; 261 _M_next_bufsiz *= _S_growth_factor; 262 } 263 264 void 265 monotonic_buffer_resource::_M_release_buffers() noexcept 266 { 267 _Chunk::release(_M_head, _M_upstream); 268 } 269 270 // Helper types for synchronized_pool_resource & unsynchronized_pool_resource 271 272 namespace { 273 274 // Simple bitset with runtime size. 275 // Tracks which blocks in a pool chunk are used/unused. 276 struct bitset 277 { 278 using word = uint64_t; 279 using size_type // unsigned integer type with no more than 32 bits 280 = conditional_t<numeric_limits<size_t>::digits <= 32, size_t, uint32_t>; 281 282 static constexpr unsigned bits_per_word = numeric_limits<word>::digits; 283 284 // The bitset does not own p 285 bitset(void* p, size_type num_blocks) 286 : _M_words(static_cast<word*>(p)), _M_size(num_blocks), 287 _M_next_word(0) 288 { 289 const size_type last_word = num_blocks / bits_per_word; 290 __builtin_memset(_M_words, 0, last_word * sizeof(*_M_words)); 291 // Set bits beyond _M_size, so they are not treated as free blocks: 292 if (const size_type extra_bits = num_blocks % bits_per_word) 293 _M_words[last_word] = word(-1) << extra_bits; 294 __glibcxx_assert( empty() ); 295 __glibcxx_assert( free() == num_blocks ); 296 } 297 298 bitset() = default; 299 ~bitset() = default; 300 301 // Number of blocks 302 size_type size() const noexcept { return _M_size; } 303 304 // Number of free blocks (unset bits) 305 size_type free() const noexcept 306 { 307 size_type n = 0; 308 for (size_type i = _M_next_word; i < nwords(); ++i) 309 n += (bits_per_word - std::__popcount(_M_words[i])); 310 return n; 311 } 312 313 // True if there are no free blocks (all bits are set) 314 bool full() const noexcept 315 { 316 if (_M_next_word >= nwords()) 317 return true; 318 // For a bitset with size() > (max_blocks_per_chunk() - 64) we will 319 // have nwords() == (max_word_index() + 1) and so _M_next_word will 320 // never be equal to nwords(). 321 // In that case, check if the last word is full: 322 if (_M_next_word == max_word_index()) 323 return _M_words[_M_next_word] == word(-1); 324 return false; 325 } 326 327 // True if size() != 0 and all blocks are free (no bits are set). 328 bool empty() const noexcept 329 { 330 if (nwords() == 0) 331 return false; 332 if (_M_next_word != 0) 333 return false; 334 for (size_type i = 0; i < nwords() - 1; ++i) 335 if (_M_words[i] != 0) 336 return false; 337 word last = _M_words[nwords() - 1]; 338 if (const size_type extra_bits = size() % bits_per_word) 339 last <<= (bits_per_word - extra_bits); 340 return last == 0; 341 } 342 343 void reset() noexcept 344 { 345 _M_words = nullptr; 346 _M_size = _M_next_word = 0; 347 } 348 349 bool operator[](size_type n) const noexcept 350 { 351 __glibcxx_assert( n < _M_size ); 352 const size_type wd = n / bits_per_word; 353 const word bit = word(1) << (n % bits_per_word); 354 return _M_words[wd] & bit; 355 } 356 357 size_type get_first_unset() noexcept 358 { 359 const size_type wd = _M_next_word; 360 if (wd < nwords()) 361 { 362 const size_type n = std::__countr_one(_M_words[wd]); 363 if (n < bits_per_word) 364 { 365 const word bit = word(1) << n; 366 _M_words[wd] |= bit; 367 update_next_word(); 368 return (wd * bits_per_word) + n; 369 } 370 } 371 return size_type(-1); 372 } 373 374 void set(size_type n) noexcept 375 { 376 __glibcxx_assert( n < _M_size ); 377 const size_type wd = n / bits_per_word; 378 const word bit = word(1) << (n % bits_per_word); 379 _M_words[wd] |= bit; 380 if (wd == _M_next_word) 381 update_next_word(); 382 } 383 384 void clear(size_type n) noexcept 385 { 386 __glibcxx_assert( n < _M_size ); 387 const size_type wd = n / bits_per_word; 388 const word bit = word(1) << (n % bits_per_word); 389 _M_words[wd] &= ~bit; 390 if (wd < _M_next_word) 391 _M_next_word = wd; 392 } 393 394 // Update _M_next_word to refer to the next word with an unset bit. 395 // The size of the _M_next_word bit-field means it cannot represent 396 // the maximum possible nwords() value. To avoid wraparound to zero 397 // this function saturates _M_next_word at max_word_index(). 398 void update_next_word() noexcept 399 { 400 size_type next = _M_next_word; 401 while (_M_words[next] == word(-1) && ++next < nwords()) 402 { } 403 _M_next_word = std::min(next, max_word_index()); 404 } 405 406 void swap(bitset& b) noexcept 407 { 408 std::swap(_M_words, b._M_words); 409 size_type tmp = _M_size; 410 _M_size = b._M_size; 411 b._M_size = tmp; 412 tmp = _M_next_word; 413 _M_next_word = b._M_next_word; 414 b._M_next_word = tmp; 415 } 416 417 size_type nwords() const noexcept 418 { return (_M_size + bits_per_word - 1) / bits_per_word; } 419 420 // Maximum value that can be stored in bitset::_M_size member (approx 500k) 421 static constexpr size_type max_blocks_per_chunk() noexcept 422 { return (size_type(1) << _S_size_digits) - 1; } 423 424 // Maximum value that can be stored in bitset::_M_next_word member (8191). 425 static constexpr size_type max_word_index() noexcept 426 { return (max_blocks_per_chunk() + bits_per_word - 1) / bits_per_word; } 427 428 word* data() const noexcept { return _M_words; } 429 430 private: 431 static constexpr unsigned _S_size_digits 432 = (numeric_limits<size_type>::digits 433 + std::__bit_width(bits_per_word) - 1) / 2; 434 435 word* _M_words = nullptr; 436 // Number of blocks represented by the bitset: 437 size_type _M_size : _S_size_digits; 438 // Index of the first word with unset bits: 439 size_type _M_next_word : numeric_limits<size_type>::digits - _S_size_digits; 440 }; 441 442 // A "chunk" belonging to a pool. 443 // A chunk contains many blocks of the same size. 444 // Derived from bitset to reuse its tail-padding. 445 struct chunk : bitset 446 { 447 chunk() = default; 448 449 // p points to the start of a chunk of size bytes in length. 450 // The chunk has space for n blocks, followed by a bitset of size n 451 // that begins at address words. 452 // This object does not own p or words, the caller will free it. 453 chunk(void* p, uint32_t bytes, void* words, size_t n) 454 : bitset(words, n), 455 _M_bytes(bytes), 456 _M_p(static_cast<std::byte*>(p)) 457 { __glibcxx_assert(bytes <= chunk::max_bytes_per_chunk()); } 458 459 chunk(chunk&& c) noexcept 460 : bitset(std::move(c)), _M_bytes(c._M_bytes), _M_p(c._M_p) 461 { 462 c._M_bytes = 0; 463 c._M_p = nullptr; 464 c.reset(); 465 } 466 467 chunk& operator=(chunk&& c) noexcept 468 { 469 swap(c); 470 return *this; 471 } 472 473 // Allocated size of chunk: 474 uint32_t _M_bytes = 0; 475 // Start of allocated chunk: 476 std::byte* _M_p = nullptr; 477 478 // True if there are free blocks in this chunk 479 using bitset::full; 480 // Number of blocks in this chunk 481 using bitset::size; 482 483 static constexpr uint32_t max_bytes_per_chunk() noexcept 484 { return numeric_limits<decltype(_M_bytes)>::max(); } 485 486 // Determine if block with address p and size block_size 487 // is contained within this chunk. 488 bool owns(void* p, size_t block_size) 489 { 490 std::less_equal<uintptr_t> less_equal; 491 return less_equal(reinterpret_cast<uintptr_t>(_M_p), 492 reinterpret_cast<uintptr_t>(p)) 493 && less_equal(reinterpret_cast<uintptr_t>(p) + block_size, 494 reinterpret_cast<uintptr_t>(bitset::data())); 495 } 496 497 // Allocate next available block of block_size bytes from this chunk. 498 void* reserve(size_t block_size) noexcept 499 { 500 const size_type n = get_first_unset(); 501 if (n == size_type(-1)) 502 return nullptr; 503 return _M_p + (n * block_size); 504 } 505 506 // Deallocate a single block of block_size bytes 507 void release(void* vp, size_t block_size) 508 { 509 __glibcxx_assert( owns(vp, block_size) ); 510 const size_t offset = static_cast<std::byte*>(vp) - _M_p; 511 // Pointer is correctly aligned for a block in this chunk: 512 __glibcxx_assert( (offset % block_size) == 0 ); 513 // Block has been allocated: 514 __glibcxx_assert( (*this)[offset / block_size] == true ); 515 bitset::clear(offset / block_size); 516 } 517 518 // Deallocate a single block if it belongs to this chunk. 519 bool try_release(void* p, size_t block_size) 520 { 521 if (!owns(p, block_size)) 522 return false; 523 release(p, block_size); 524 return true; 525 } 526 527 void swap(chunk& c) noexcept 528 { 529 std::swap(_M_bytes, c._M_bytes); 530 std::swap(_M_p, c._M_p); 531 bitset::swap(c); 532 } 533 534 bool operator<(const chunk& c) const noexcept 535 { return std::less<const void*>{}(_M_p, c._M_p); } 536 537 friend void swap(chunk& l, chunk& r) { l.swap(r); } 538 539 friend bool operator<(const void* p, const chunk& c) noexcept 540 { return std::less<const void*>{}(p, c._M_p); } 541 }; 542 543 // For 64-bit pointers this is the size of three pointers i.e. 24 bytes. 544 // For 32-bit and 20-bit pointers it's four pointers (16 bytes). 545 // For 16-bit pointers it's five pointers (10 bytes). 546 // TODO pad 64-bit to 4*sizeof(void*) to avoid splitting across cache lines? 547 static_assert(sizeof(chunk) 548 == sizeof(bitset::size_type) + sizeof(uint32_t) + 2 * sizeof(void*)); 549 550 // An oversized allocation that doesn't fit in a pool. 551 struct big_block 552 { 553 // Alignment must be a power-of-two so we only need to use enough bits 554 // to store the power, not the actual value: 555 static constexpr unsigned _S_alignbits 556 = std::__bit_width((unsigned)numeric_limits<size_t>::digits - 1); 557 // Use the remaining bits to store the size: 558 static constexpr unsigned _S_sizebits 559 = numeric_limits<size_t>::digits - _S_alignbits; 560 // The maximum value that can be stored in _S_size 561 static constexpr size_t all_ones = size_t(-1) >> _S_alignbits; 562 // The minimum size of a big block (smaller sizes will be rounded up). 563 static constexpr size_t min = 1u << _S_alignbits; 564 565 big_block(size_t bytes, size_t alignment) 566 : _M_size(alloc_size(bytes) >> _S_alignbits), 567 _M_align_exp(std::__bit_width(alignment) - 1u) 568 { } 569 570 void* pointer = nullptr; 571 size_t _M_size : numeric_limits<size_t>::digits - _S_alignbits; 572 size_t _M_align_exp : _S_alignbits; 573 574 size_t size() const noexcept 575 { 576 // If all bits are set in _M_size it means the maximum possible size: 577 if (__builtin_expect(_M_size == (size_t(-1) >> _S_alignbits), false)) 578 return (size_t)-1; 579 else 580 return _M_size << _S_alignbits; 581 } 582 583 size_t align() const noexcept { return size_t(1) << _M_align_exp; } 584 585 // Calculate size to be allocated instead of requested number of bytes. 586 // The requested value will be rounded up to a multiple of big_block::min, 587 // so the low _S_alignbits bits are all zero and don't need to be stored. 588 static constexpr size_t alloc_size(size_t bytes) noexcept 589 { 590 const size_t s = bytes + min - 1u; 591 if (__builtin_expect(s < bytes, false)) 592 return size_t(-1); // addition wrapped past zero, return max value 593 else 594 return s & ~(min - 1u); 595 } 596 597 friend bool operator<(void* p, const big_block& b) noexcept 598 { return less<void*>{}(p, b.pointer); } 599 600 friend bool operator<(const big_block& b, void* p) noexcept 601 { return less<void*>{}(b.pointer, p); } 602 }; 603 604 static_assert(sizeof(big_block) == (2 * sizeof(void*))); 605 606 } // namespace 607 608 // A pool that serves blocks of a particular size. 609 // Each pool manages a number of chunks. 610 // When a pool is full it is replenished by allocating another chunk. 611 struct __pool_resource::_Pool 612 { 613 // Smallest supported block size 614 static constexpr unsigned _S_min_block 615 = std::max(sizeof(void*), alignof(bitset::word)); 616 617 _Pool(size_t __block_size, size_t __blocks_per_chunk) 618 : _M_chunks(), 619 _M_block_sz(__block_size), 620 _M_blocks_per_chunk(__blocks_per_chunk) 621 { } 622 623 // Must call release(r) before destruction! 624 ~_Pool() { __glibcxx_assert(_M_chunks.empty()); } 625 626 _Pool(_Pool&&) noexcept = default; 627 _Pool& operator=(_Pool&&) noexcept = default; 628 629 // Size of blocks in this pool 630 size_t block_size() const noexcept 631 { return _M_block_sz; } 632 633 // Allocate a block if the pool is not full, otherwise return null. 634 void* try_allocate() noexcept 635 { 636 const size_t blocksz = block_size(); 637 if (!_M_chunks.empty()) 638 { 639 auto& last = _M_chunks.back(); 640 if (void* p = last.reserve(blocksz)) 641 return p; 642 // TODO last is full, so move another chunk to the back instead? 643 for (auto it = _M_chunks.begin(); it != &last; ++it) 644 if (void* p = it->reserve(blocksz)) 645 return p; 646 } 647 return nullptr; 648 } 649 650 // Allocate a block from the pool, replenishing from upstream if needed. 651 void* allocate(memory_resource* r, const pool_options& opts) 652 { 653 if (void* p = try_allocate()) 654 return p; 655 replenish(r, opts); 656 return _M_chunks.back().reserve(block_size()); 657 } 658 659 // Return a block to the pool. 660 bool deallocate(memory_resource*, void* p) 661 { 662 const size_t blocksz = block_size(); 663 if (__builtin_expect(!_M_chunks.empty(), true)) 664 { 665 auto& last = _M_chunks.back(); 666 if (last.try_release(p, blocksz)) 667 return true; 668 auto it = std::upper_bound(_M_chunks.begin(), &last, p); 669 if (it != _M_chunks.begin()) 670 { 671 it--; 672 if (it->try_release(p, blocksz)) 673 // If chunk is empty could return to upstream, but we don't 674 // currently do that. Pools only increase in size. 675 return true; 676 } 677 } 678 return false; 679 } 680 681 void replenish(memory_resource* __r, const pool_options& __opts) 682 { 683 using word = chunk::word; 684 const size_t __blocks = _M_blocks_per_chunk; 685 const auto __bits = chunk::bits_per_word; 686 const size_t __words = (__blocks + __bits - 1) / __bits; 687 const size_t __block_size = block_size(); 688 size_t __bytes = __blocks * __block_size + __words * sizeof(word); 689 size_t __alignment = std::__bit_ceil(__block_size); 690 void* __p = __r->allocate(__bytes, __alignment); 691 __try 692 { 693 size_t __n = __blocks * __block_size; 694 void* __pwords = static_cast<char*>(__p) + __n; 695 _M_chunks.insert(chunk(__p, __bytes, __pwords, __blocks), __r); 696 } 697 __catch (...) 698 { 699 __r->deallocate(__p, __bytes, __alignment); 700 } 701 if (_M_blocks_per_chunk < __opts.max_blocks_per_chunk) 702 { 703 const size_t max_blocks 704 = (chunk::max_bytes_per_chunk() - sizeof(word)) 705 / (__block_size + 0.125); 706 _M_blocks_per_chunk = std::min({ 707 max_blocks, 708 __opts.max_blocks_per_chunk, 709 (size_t)_M_blocks_per_chunk * 2 710 }); 711 } 712 } 713 714 void release(memory_resource* __r) 715 { 716 const size_t __alignment = std::__bit_ceil(block_size()); 717 for (auto& __c : _M_chunks) 718 if (__c._M_p) 719 __r->deallocate(__c._M_p, __c._M_bytes, __alignment); 720 _M_chunks.clear(__r); 721 } 722 723 // A "resourceless vector" instead of pmr::vector, to save space. 724 // All resize operations need to be passed a memory resource, which 725 // obviously needs to be the same one every time. 726 // Chunks are kept sorted by address of their first block, except for 727 // the most recently-allocated Chunk which is at the end of the vector. 728 struct vector 729 { 730 using value_type = chunk; 731 using size_type = unsigned; 732 using iterator = value_type*; 733 734 // A vector owns its data pointer but not memory held by its elements. 735 chunk* data = nullptr; 736 size_type size = 0; 737 size_type capacity = 0; 738 739 vector() = default; 740 741 vector(size_type __n, memory_resource* __r) 742 : data(polymorphic_allocator<value_type>(__r).allocate(__n)), 743 capacity(__n) 744 { } 745 746 // Must call clear(r) before destruction! 747 ~vector() { __glibcxx_assert(data == nullptr); } 748 749 vector(vector&& __rval) noexcept 750 : data(__rval.data), size(__rval.size), capacity(__rval.capacity) 751 { 752 __rval.data = nullptr; 753 __rval.capacity = __rval.size = 0; 754 } 755 756 vector& operator=(vector&& __rval) noexcept 757 { 758 __glibcxx_assert(data == nullptr); 759 data = __rval.data; 760 size = __rval.size; 761 capacity = __rval.capacity; 762 __rval.data = nullptr; 763 __rval.capacity = __rval.size = 0; 764 return *this; 765 } 766 767 // void resize(size_type __n, memory_resource* __r); 768 // void reserve(size_type __n, memory_resource* __r); 769 770 void clear(memory_resource* __r) 771 { 772 if (!data) 773 return; 774 // Chunks must be individually freed before clearing the vector. 775 std::destroy(begin(), end()); 776 polymorphic_allocator<value_type>(__r).deallocate(data, capacity); 777 data = nullptr; 778 capacity = size = 0; 779 } 780 781 // Sort existing elements then insert new one at the end. 782 iterator insert(chunk&& c, memory_resource* r) 783 { 784 if (size < capacity) 785 { 786 if (size > 1) 787 { 788 auto mid = end() - 1; 789 std::rotate(std::lower_bound(begin(), mid, *mid), mid, end()); 790 } 791 } 792 else if (size > 0) 793 { 794 polymorphic_allocator<value_type> __alloc(r); 795 auto __mid = std::lower_bound(begin(), end() - 1, back()); 796 auto __p = __alloc.allocate(capacity * 1.5); 797 // move [begin,__mid) to new storage 798 auto __p2 = std::move(begin(), __mid, __p); 799 // move end-1 to new storage 800 *__p2 = std::move(back()); 801 // move [__mid,end-1) to new storage 802 std::move(__mid, end() - 1, ++__p2); 803 std::destroy(begin(), end()); 804 __alloc.deallocate(data, capacity); 805 data = __p; 806 capacity *= 1.5; 807 } 808 else 809 { 810 polymorphic_allocator<value_type> __alloc(r); 811 data = __alloc.allocate(capacity = 8); 812 } 813 auto back = ::new (data + size) chunk(std::move(c)); 814 __glibcxx_assert(std::is_sorted(begin(), back)); 815 ++size; 816 return back; 817 } 818 819 iterator begin() const { return data; } 820 iterator end() const { return data + size; } 821 822 bool empty() const noexcept { return size == 0; } 823 824 value_type& back() { return data[size - 1]; } 825 }; 826 827 vector _M_chunks; 828 unsigned _M_block_sz; // size of blocks allocated from this pool 829 unsigned _M_blocks_per_chunk; // number of blocks to allocate next 830 }; 831 832 // An oversized allocation that doesn't fit in a pool. 833 struct __pool_resource::_BigBlock : big_block 834 { 835 using big_block::big_block; 836 }; 837 838 namespace { 839 840 constexpr size_t pool_sizes[] = { 841 8, 16, 24, 842 32, 48, 843 64, 80, 96, 112, 844 128, 192, 845 256, 320, 384, 448, 846 512, 768, 847 #if __SIZE_WIDTH__ > 16 848 1024, 1536, 849 2048, 3072, 850 #if __SIZE_WIDTH__ > 20 851 1<<12, 1<<13, 1<<14, 852 1<<15, 1<<16, 1<<17, 853 1<<20, 1<<21, 1<<22 // 4MB should be enough for anybody 854 #endif 855 #endif 856 }; 857 858 pool_options 859 munge_options(pool_options opts) 860 { 861 // The values in the returned struct may differ from those supplied 862 // to the pool resource constructor in that values of zero will be 863 // replaced with implementation-defined defaults, and sizes may be 864 // rounded to unspecified granularity. 865 866 // max_blocks_per_chunk sets the absolute maximum for the pool resource. 867 // Each pool might have a smaller maximum, because pools for very large 868 // objects might impose smaller limit. 869 if (opts.max_blocks_per_chunk == 0) 870 { 871 // Pick a default that depends on the number of bits in size_t. 872 opts.max_blocks_per_chunk = __SIZE_WIDTH__ << 8; 873 } 874 else 875 { 876 // Round to preferred granularity. 877 if (opts.max_blocks_per_chunk < size_t(-4)) 878 { 879 // round up 880 opts.max_blocks_per_chunk += 3; 881 opts.max_blocks_per_chunk &= ~size_t(3); 882 } 883 else 884 { 885 // round down 886 opts.max_blocks_per_chunk &= ~size_t(3); 887 } 888 } 889 890 if (opts.max_blocks_per_chunk > chunk::max_blocks_per_chunk()) 891 { 892 opts.max_blocks_per_chunk = chunk::max_blocks_per_chunk(); 893 } 894 895 // largest_required_pool_block specifies the largest block size that will 896 // be allocated from a pool. Larger allocations will come directly from 897 // the upstream resource and so will not be pooled. 898 if (opts.largest_required_pool_block == 0) 899 { 900 // Pick a sensible default that depends on the number of bits in size_t 901 // (pools with larger block sizes must be explicitly requested by 902 // using a non-zero value for largest_required_pool_block). 903 opts.largest_required_pool_block = __SIZE_WIDTH__ << 6; 904 } 905 else 906 { 907 // Round to preferred granularity 908 static_assert(std::__has_single_bit(pool_sizes[0])); 909 constexpr size_t mask = pool_sizes[0] - 1; 910 opts.largest_required_pool_block += mask; 911 opts.largest_required_pool_block &= ~mask; 912 } 913 914 if (opts.largest_required_pool_block < big_block::min) 915 { 916 opts.largest_required_pool_block = big_block::min; 917 } 918 else if (opts.largest_required_pool_block > std::end(pool_sizes)[-1]) 919 { 920 // Setting _M_opts to the largest pool allows users to query it: 921 opts.largest_required_pool_block = std::end(pool_sizes)[-1]; 922 } 923 return opts; 924 } 925 926 inline int 927 pool_index(size_t block_size, int npools) 928 { 929 auto p = std::lower_bound(pool_sizes, pool_sizes + npools, block_size); 930 int n = p - pool_sizes; 931 if (n != npools) 932 return n; 933 return -1; 934 } 935 936 inline int 937 select_num_pools(const pool_options& opts) 938 { 939 auto p = std::lower_bound(std::begin(pool_sizes), std::end(pool_sizes), 940 opts.largest_required_pool_block); 941 const int n = p - std::begin(pool_sizes); 942 if (p == std::end(pool_sizes)) 943 return n; 944 return n + 1; 945 } 946 947 #ifdef _GLIBCXX_HAS_GTHREADS 948 using shared_lock = std::shared_lock<shared_mutex>; 949 using exclusive_lock = lock_guard<shared_mutex>; 950 #endif 951 952 } // namespace 953 954 __pool_resource:: 955 __pool_resource(const pool_options& opts, memory_resource* upstream) 956 : _M_opts(munge_options(opts)), _M_unpooled(upstream), 957 _M_npools(select_num_pools(_M_opts)) 958 { } 959 960 __pool_resource::~__pool_resource() { release(); } 961 962 void 963 __pool_resource::release() noexcept 964 { 965 memory_resource* res = resource(); 966 // deallocate oversize allocations 967 for (auto& b : _M_unpooled) 968 res->deallocate(b.pointer, b.size(), b.align()); 969 pmr::vector<_BigBlock>{res}.swap(_M_unpooled); 970 } 971 972 void* 973 __pool_resource::allocate(size_t bytes, size_t alignment) 974 { 975 auto& b = _M_unpooled.emplace_back(bytes, alignment); 976 __try { 977 // N.B. need to allocate b.size(), which might be larger than bytes. 978 void* p = resource()->allocate(b.size(), alignment); 979 b.pointer = p; 980 if (_M_unpooled.size() > 1) 981 { 982 const auto mid = _M_unpooled.end() - 1; 983 // move to right position in vector 984 std::rotate(std::lower_bound(_M_unpooled.begin(), mid, p), 985 mid, _M_unpooled.end()); 986 } 987 return p; 988 } __catch(...) { 989 _M_unpooled.pop_back(); 990 __throw_exception_again; 991 } 992 } 993 994 void 995 __pool_resource::deallocate(void* p, size_t bytes [[maybe_unused]], 996 size_t alignment [[maybe_unused]]) 997 { 998 const auto it 999 = std::lower_bound(_M_unpooled.begin(), _M_unpooled.end(), p); 1000 __glibcxx_assert(it != _M_unpooled.end() && it->pointer == p); 1001 if (it != _M_unpooled.end() && it->pointer == p) // [[likely]] 1002 { 1003 const auto b = *it; 1004 __glibcxx_assert(b.size() == b.alloc_size(bytes)); 1005 __glibcxx_assert(b.align() == alignment); 1006 _M_unpooled.erase(it); 1007 // N.B. need to deallocate b.size(), which might be larger than bytes. 1008 resource()->deallocate(p, b.size(), b.align()); 1009 } 1010 } 1011 1012 // Create array of pools, allocated from upstream resource. 1013 auto 1014 __pool_resource::_M_alloc_pools() 1015 -> _Pool* 1016 { 1017 polymorphic_allocator<_Pool> alloc{resource()}; 1018 _Pool* p = alloc.allocate(_M_npools); 1019 for (int i = 0; i < _M_npools; ++i) 1020 { 1021 // For last pool use largest_required_pool_block 1022 const size_t block_size = (i+1 == _M_npools) 1023 ? _M_opts.largest_required_pool_block 1024 : pool_sizes[i]; 1025 1026 // Decide on initial number of blocks per chunk. 1027 // At least 16 blocks per chunk seems reasonable, 1028 // more for smaller blocks: 1029 size_t blocks_per_chunk = std::max(size_t(16), 1024 / block_size); 1030 // But don't exceed the requested max_blocks_per_chunk: 1031 blocks_per_chunk 1032 = std::min(blocks_per_chunk, _M_opts.max_blocks_per_chunk); 1033 // Allow space for bitset to track which blocks are used/unused: 1034 blocks_per_chunk *= 1 - 1.0 / (__CHAR_BIT__ * block_size); 1035 // Construct a _Pool for the given block size and initial chunk size: 1036 alloc.construct(p + i, block_size, blocks_per_chunk); 1037 } 1038 return p; 1039 } 1040 1041 #ifdef _GLIBCXX_HAS_GTHREADS 1042 // synchronized_pool_resource members. 1043 1044 /* Notes on implementation and thread safety: 1045 * 1046 * Each synchronized_pool_resource manages an linked list of N+1 _TPools 1047 * objects, where N is the number of threads using the pool resource. 1048 * Each _TPools object has its own set of pools, with their own chunks. 1049 * The first element of the list, _M_tpools[0], can be used by any thread. 1050 * The rest of the list contains a _TPools object for each thread, 1051 * accessed via the thread-specific key _M_key (and referred to for 1052 * exposition as _M_tpools[_M_key]). 1053 * The first element, _M_tpools[0], contains "orphaned chunks" which were 1054 * allocated by a thread which has since exited, and so there is no 1055 * _M_tpools[_M_key] for that thread. Orphaned chunks are never reused, 1056 * they're only held in _M_tpools[0] so they can be deallocated. 1057 * A thread can access its own thread-specific set of pools via _M_key 1058 * while holding a shared lock on _M_mx. Accessing _M_impl._M_unpooled 1059 * or _M_tpools[0] or any other thread's _M_tpools[_M_key] requires an 1060 * exclusive lock. 1061 * The upstream_resource() pointer can be obtained without a lock, but 1062 * any dereference of that pointer requires an exclusive lock. 1063 * The _M_impl._M_opts and _M_impl._M_npools members are immutable, 1064 * and can safely be accessed concurrently. 1065 * 1066 * In a single-threaded program (i.e. __gthread_active_p() == false) 1067 * the pool resource only needs one set of pools and never has orphaned 1068 * chunks, so just uses _M_tpools[0] directly, and _M_tpools->next is null. 1069 */ 1070 1071 extern "C" { 1072 static void destroy_TPools(void*); 1073 } 1074 1075 struct synchronized_pool_resource::_TPools 1076 { 1077 // Exclusive lock must be held in the thread where this constructor runs. 1078 explicit 1079 _TPools(synchronized_pool_resource& owner, exclusive_lock&) 1080 : owner(owner), pools(owner._M_impl._M_alloc_pools()) 1081 { 1082 // __builtin_printf("%p constructing\n", this); 1083 __glibcxx_assert(pools); 1084 } 1085 1086 // Exclusive lock must be held in the thread where this destructor runs. 1087 ~_TPools() 1088 { 1089 __glibcxx_assert(pools); 1090 if (pools) 1091 { 1092 memory_resource* r = owner.upstream_resource(); 1093 for (int i = 0; i < owner._M_impl._M_npools; ++i) 1094 pools[i].release(r); 1095 std::destroy_n(pools, owner._M_impl._M_npools); 1096 polymorphic_allocator<__pool_resource::_Pool> a(r); 1097 a.deallocate(pools, owner._M_impl._M_npools); 1098 } 1099 if (prev) 1100 prev->next = next; 1101 if (next) 1102 next->prev = prev; 1103 } 1104 1105 // Exclusive lock must be held in the thread where this function runs. 1106 void move_nonempty_chunks() 1107 { 1108 __glibcxx_assert(pools); 1109 __glibcxx_assert(__gthread_active_p()); 1110 if (!pools) 1111 return; 1112 memory_resource* const r = owner.upstream_resource(); 1113 auto* const shared = owner._M_tpools->pools; 1114 // move all non-empty chunks to the shared _TPools 1115 for (int i = 0; i < owner._M_impl._M_npools; ++i) 1116 for (auto& c : pools[i]._M_chunks) 1117 if (!c.empty()) 1118 shared[i]._M_chunks.insert(std::move(c), r); 1119 } 1120 1121 synchronized_pool_resource& owner; 1122 __pool_resource::_Pool* pools = nullptr; 1123 _TPools* prev = nullptr; 1124 _TPools* next = nullptr; 1125 1126 static void destroy(_TPools* p) 1127 { 1128 exclusive_lock l(p->owner._M_mx); 1129 // __glibcxx_assert(p != p->owner._M_tpools); 1130 p->move_nonempty_chunks(); 1131 polymorphic_allocator<_TPools> a(p->owner.upstream_resource()); 1132 p->~_TPools(); 1133 a.deallocate(p, 1); 1134 } 1135 }; 1136 1137 // Called when a thread exits 1138 extern "C" { 1139 static void destroy_TPools(void* p) 1140 { 1141 using _TPools = synchronized_pool_resource::_TPools; 1142 _TPools::destroy(static_cast<_TPools*>(p)); 1143 } 1144 } 1145 1146 // Constructor 1147 synchronized_pool_resource:: 1148 synchronized_pool_resource(const pool_options& opts, 1149 memory_resource* upstream) 1150 : _M_impl(opts, upstream) 1151 { 1152 if (__gthread_active_p()) 1153 if (int err = __gthread_key_create(&_M_key, destroy_TPools)) 1154 __throw_system_error(err); 1155 exclusive_lock l(_M_mx); 1156 _M_tpools = _M_alloc_shared_tpools(l); 1157 } 1158 1159 // Destructor 1160 synchronized_pool_resource::~synchronized_pool_resource() 1161 { 1162 release(); 1163 if (__gthread_active_p()) 1164 __gthread_key_delete(_M_key); // does not run destroy_TPools 1165 } 1166 1167 void 1168 synchronized_pool_resource::release() 1169 { 1170 exclusive_lock l(_M_mx); 1171 if (_M_tpools) 1172 { 1173 if (__gthread_active_p()) 1174 { 1175 __gthread_key_delete(_M_key); // does not run destroy_TPools 1176 __gthread_key_create(&_M_key, destroy_TPools); 1177 } 1178 polymorphic_allocator<_TPools> a(upstream_resource()); 1179 // destroy+deallocate each _TPools 1180 do 1181 { 1182 _TPools* p = _M_tpools; 1183 _M_tpools = _M_tpools->next; 1184 p->~_TPools(); 1185 a.deallocate(p, 1); 1186 } 1187 while (_M_tpools); 1188 } 1189 // release unpooled memory 1190 _M_impl.release(); 1191 } 1192 1193 // Caller must hold shared or exclusive lock to ensure the pointer 1194 // isn't invalidated before it can be used. 1195 auto 1196 synchronized_pool_resource::_M_thread_specific_pools() noexcept 1197 { 1198 __pool_resource::_Pool* pools = nullptr; 1199 __glibcxx_assert(__gthread_active_p()); 1200 if (auto tp = static_cast<_TPools*>(__gthread_getspecific(_M_key))) 1201 { 1202 pools = tp->pools; 1203 // __glibcxx_assert(tp->pools); 1204 } 1205 return pools; 1206 } 1207 1208 // Override for memory_resource::do_allocate 1209 void* 1210 synchronized_pool_resource:: 1211 do_allocate(size_t bytes, size_t alignment) 1212 { 1213 const auto block_size = std::max(bytes, alignment); 1214 const pool_options opts = _M_impl._M_opts; 1215 if (block_size <= opts.largest_required_pool_block) 1216 { 1217 const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools); 1218 if (__gthread_active_p()) 1219 { 1220 // Try to allocate from the thread-specific pool. 1221 shared_lock l(_M_mx); 1222 if (auto pools = _M_thread_specific_pools()) // [[likely]] 1223 { 1224 // Need exclusive lock to replenish so use try_allocate: 1225 if (void* p = pools[index].try_allocate()) 1226 return p; 1227 // Need to take exclusive lock and replenish pool. 1228 } 1229 // Need to allocate or replenish thread-specific pools using 1230 // upstream resource, so need to hold exclusive lock. 1231 } 1232 else // single-threaded 1233 { 1234 if (!_M_tpools) // [[unlikely]] 1235 { 1236 exclusive_lock dummy(_M_mx); 1237 _M_tpools = _M_alloc_shared_tpools(dummy); 1238 } 1239 return _M_tpools->pools[index].allocate(upstream_resource(), opts); 1240 } 1241 1242 // N.B. Another thread could call release() now lock is not held. 1243 exclusive_lock excl(_M_mx); 1244 if (!_M_tpools) // [[unlikely]] 1245 _M_tpools = _M_alloc_shared_tpools(excl); 1246 auto pools = _M_thread_specific_pools(); 1247 if (!pools) 1248 pools = _M_alloc_tpools(excl)->pools; 1249 return pools[index].allocate(upstream_resource(), opts); 1250 } 1251 exclusive_lock l(_M_mx); 1252 return _M_impl.allocate(bytes, alignment); // unpooled allocation 1253 } 1254 1255 // Override for memory_resource::do_deallocate 1256 void 1257 synchronized_pool_resource:: 1258 do_deallocate(void* p, size_t bytes, size_t alignment) 1259 { 1260 size_t block_size = std::max(bytes, alignment); 1261 if (block_size <= _M_impl._M_opts.largest_required_pool_block) 1262 { 1263 const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools); 1264 __glibcxx_assert(index != -1); 1265 if (__gthread_active_p()) 1266 { 1267 shared_lock l(_M_mx); 1268 if (auto pools = _M_thread_specific_pools()) 1269 { 1270 // No need to lock here, no other thread is accessing this pool. 1271 if (pools[index].deallocate(upstream_resource(), p)) 1272 return; 1273 } 1274 // Block might have come from a different thread's pool, 1275 // take exclusive lock and check every pool. 1276 } 1277 else // single-threaded 1278 { 1279 __glibcxx_assert(_M_tpools != nullptr); 1280 if (_M_tpools) // [[likely]] 1281 _M_tpools->pools[index].deallocate(upstream_resource(), p); 1282 return; 1283 } 1284 1285 // TODO store {p, bytes, alignment} somewhere and defer returning 1286 // the block to the correct thread-specific pool until we next 1287 // take the exclusive lock. 1288 1289 exclusive_lock excl(_M_mx); 1290 auto my_pools = _M_thread_specific_pools(); 1291 for (_TPools* t = _M_tpools; t != nullptr; t = t->next) 1292 { 1293 if (t->pools != my_pools) 1294 if (t->pools) // [[likely]] 1295 { 1296 if (t->pools[index].deallocate(upstream_resource(), p)) 1297 return; 1298 } 1299 } 1300 // Not necessarily an error to reach here, release() could have been 1301 // called on another thread between releasing the shared lock and 1302 // acquiring the exclusive lock. 1303 return; 1304 } 1305 exclusive_lock l(_M_mx); 1306 _M_impl.deallocate(p, bytes, alignment); 1307 } 1308 1309 // Allocate a thread-specific _TPools object and add it to the linked list. 1310 auto 1311 synchronized_pool_resource::_M_alloc_tpools(exclusive_lock& l) 1312 -> _TPools* 1313 { 1314 __glibcxx_assert(_M_tpools != nullptr); 1315 __glibcxx_assert(__gthread_active_p()); 1316 // dump_list(_M_tpools); 1317 polymorphic_allocator<_TPools> a(upstream_resource()); 1318 _TPools* p = a.allocate(1); 1319 bool constructed = false; 1320 __try 1321 { 1322 a.construct(p, *this, l); 1323 constructed = true; 1324 // __glibcxx_assert(__gthread_getspecific(_M_key) == nullptr); 1325 if (int err = __gthread_setspecific(_M_key, p)) 1326 __throw_system_error(err); 1327 } 1328 __catch(...) 1329 { 1330 if (constructed) 1331 a.destroy(p); 1332 a.deallocate(p, 1); 1333 __throw_exception_again; 1334 } 1335 p->prev = _M_tpools; 1336 p->next = _M_tpools->next; 1337 _M_tpools->next = p; 1338 if (p->next) 1339 p->next->prev = p; 1340 return p; 1341 } 1342 1343 // Allocate the shared _TPools object, _M_tpools[0] 1344 auto 1345 synchronized_pool_resource::_M_alloc_shared_tpools(exclusive_lock& l) 1346 -> _TPools* 1347 { 1348 __glibcxx_assert(_M_tpools == nullptr); 1349 polymorphic_allocator<_TPools> a(upstream_resource()); 1350 _TPools* p = a.allocate(1); 1351 __try 1352 { 1353 a.construct(p, *this, l); 1354 } 1355 __catch(...) 1356 { 1357 a.deallocate(p, 1); 1358 __throw_exception_again; 1359 } 1360 // __glibcxx_assert(p->next == nullptr); 1361 // __glibcxx_assert(p->prev == nullptr); 1362 return p; 1363 } 1364 #endif // _GLIBCXX_HAS_GTHREADS 1365 1366 // unsynchronized_pool_resource member functions 1367 1368 // Constructor 1369 unsynchronized_pool_resource:: 1370 unsynchronized_pool_resource(const pool_options& opts, 1371 memory_resource* upstream) 1372 : _M_impl(opts, upstream), _M_pools(_M_impl._M_alloc_pools()) 1373 { } 1374 1375 // Destructor 1376 unsynchronized_pool_resource::~unsynchronized_pool_resource() 1377 { release(); } 1378 1379 // Return all memory to upstream resource. 1380 void 1381 unsynchronized_pool_resource::release() 1382 { 1383 // release pooled memory 1384 if (_M_pools) 1385 { 1386 memory_resource* res = upstream_resource(); 1387 polymorphic_allocator<_Pool> alloc{res}; 1388 for (int i = 0; i < _M_impl._M_npools; ++i) 1389 { 1390 _M_pools[i].release(res); 1391 alloc.destroy(_M_pools + i); 1392 } 1393 alloc.deallocate(_M_pools, _M_impl._M_npools); 1394 _M_pools = nullptr; 1395 } 1396 1397 // release unpooled memory 1398 _M_impl.release(); 1399 } 1400 1401 // Find the right pool for a block of size block_size. 1402 auto 1403 unsynchronized_pool_resource::_M_find_pool(size_t block_size) noexcept 1404 { 1405 __pool_resource::_Pool* pool = nullptr; 1406 if (_M_pools) // [[likely]] 1407 { 1408 int index = pool_index(block_size, _M_impl._M_npools); 1409 if (index != -1) 1410 pool = _M_pools + index; 1411 } 1412 return pool; 1413 } 1414 1415 // Override for memory_resource::do_allocate 1416 void* 1417 unsynchronized_pool_resource::do_allocate(size_t bytes, size_t alignment) 1418 { 1419 const auto block_size = std::max(bytes, alignment); 1420 if (block_size <= _M_impl._M_opts.largest_required_pool_block) 1421 { 1422 // Recreate pools if release() has been called: 1423 if (__builtin_expect(_M_pools == nullptr, false)) 1424 _M_pools = _M_impl._M_alloc_pools(); 1425 if (auto pool = _M_find_pool(block_size)) 1426 return pool->allocate(upstream_resource(), _M_impl._M_opts); 1427 } 1428 return _M_impl.allocate(bytes, alignment); 1429 } 1430 1431 // Override for memory_resource::do_deallocate 1432 void 1433 unsynchronized_pool_resource:: 1434 do_deallocate(void* p, size_t bytes, size_t alignment) 1435 { 1436 size_t block_size = std::max(bytes, alignment); 1437 if (block_size <= _M_impl._M_opts.largest_required_pool_block) 1438 { 1439 if (auto pool = _M_find_pool(block_size)) 1440 { 1441 pool->deallocate(upstream_resource(), p); 1442 return; 1443 } 1444 } 1445 _M_impl.deallocate(p, bytes, alignment); 1446 } 1447 1448 } // namespace pmr 1449 _GLIBCXX_END_NAMESPACE_VERSION 1450 } // namespace std 1451