xref: /netbsd-src/external/gpl3/gcc.old/dist/libstdc++-v3/src/c++17/memory_resource.cc (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 // <memory_resource> implementation -*- C++ -*-
2 
3 // Copyright (C) 2018-2019 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 #include <memory_resource>
26 #include <algorithm>			// lower_bound, rotate
27 #include <atomic>
28 #include <bit>				// __ceil2, __log2p1
29 #include <new>
30 #if ATOMIC_POINTER_LOCK_FREE != 2
31 # include <bits/std_mutex.h>	// std::mutex, std::lock_guard
32 # include <bits/move.h>		// std::exchange
33 #endif
34 
35 namespace std _GLIBCXX_VISIBILITY(default)
36 {
37 _GLIBCXX_BEGIN_NAMESPACE_VERSION
38 namespace pmr
39 {
40   // This was defined inline in 9.1 and 9.2 so code compiled by those
41   // versions will not use this symbol.
42   memory_resource::~memory_resource() = default;
43 
44   namespace
45   {
46     class newdel_res_t final : public memory_resource
47     {
48       void*
49       do_allocate(size_t __bytes, size_t __alignment) override
50       { return ::operator new(__bytes, std::align_val_t(__alignment)); }
51 
52       void
53       do_deallocate(void* __p, size_t __bytes, size_t __alignment) noexcept
54       override
55       { ::operator delete(__p, __bytes, std::align_val_t(__alignment)); }
56 
57       bool
58       do_is_equal(const memory_resource& __other) const noexcept override
59       { return &__other == this; }
60     };
61 
62     class null_res_t final : public memory_resource
63     {
64       void*
65       do_allocate(size_t, size_t) override
66       { std::__throw_bad_alloc(); }
67 
68       void
69       do_deallocate(void*, size_t, size_t) noexcept override
70       { }
71 
72       bool
73       do_is_equal(const memory_resource& __other) const noexcept override
74       { return &__other == this; }
75     };
76 
77     template<typename T>
78       struct constant_init
79       {
80 	union {
81 	  unsigned char unused;
82 	  T obj;
83 	};
84 	constexpr constant_init() : obj() { }
85 
86 	template<typename U>
87 	  explicit constexpr constant_init(U arg) : obj(arg) { }
88 
89 	~constant_init() { /* do nothing, union member is not destroyed */ }
90       };
91 
92     constant_init<newdel_res_t> newdel_res{};
93     constant_init<null_res_t> null_res{};
94 #if ATOMIC_POINTER_LOCK_FREE == 2
95     using atomic_mem_res = atomic<memory_resource*>;
96 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
97 #elif defined(_GLIBCXX_HAS_GTHREADS)
98     // Can't use pointer-width atomics, define a type using a mutex instead:
99     struct atomic_mem_res
100     {
101 # ifdef __GTHREAD_MUTEX_INIT
102 #  define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
103       // std::mutex has constexpr constructor
104       constexpr
105 # endif
106       atomic_mem_res(memory_resource* r) : val(r) { }
107 
108       mutex mx;
109       memory_resource* val;
110 
111       memory_resource* load()
112       {
113 	lock_guard<mutex> lock(mx);
114 	return val;
115       }
116 
117       memory_resource* exchange(memory_resource* r)
118       {
119 	lock_guard<mutex> lock(mx);
120 	return std::exchange(val, r);
121       }
122     };
123 #else
124 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
125     // Single-threaded, no need for synchronization
126     struct atomic_mem_res
127     {
128       constexpr
129       atomic_mem_res(memory_resource* r) : val(r) { }
130 
131       memory_resource* val;
132 
133       memory_resource* load() const
134       {
135 	return val;
136       }
137 
138       memory_resource* exchange(memory_resource* r)
139       {
140 	return std::exchange(val, r);
141       }
142     };
143 #endif // ATOMIC_POINTER_LOCK_FREE == 2
144 
145 #ifdef _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
146     constant_init<atomic_mem_res> default_res{&newdel_res.obj};
147 #else
148 # include "default_resource.h"
149 #endif
150   } // namespace
151 
152   memory_resource*
153   new_delete_resource() noexcept
154   { return &newdel_res.obj; }
155 
156   memory_resource*
157   null_memory_resource() noexcept
158   { return &null_res.obj; }
159 
160   memory_resource*
161   set_default_resource(memory_resource* r) noexcept
162   {
163     if (r == nullptr)
164       r = new_delete_resource();
165     return default_res.obj.exchange(r);
166   }
167 
168   memory_resource*
169   get_default_resource() noexcept
170   { return default_res.obj.load(); }
171 
172   // Member functions for std::pmr::monotonic_buffer_resource
173 
174   // This was defined inline in 9.1 and 9.2 so code compiled by those
175   // versions will not use this symbol.
176   monotonic_buffer_resource::~monotonic_buffer_resource() { release(); }
177 
178   // Memory allocated by the upstream resource is managed in a linked list
179   // of _Chunk objects. A _Chunk object recording the size and alignment of
180   // the allocated block and a pointer to the previous chunk is placed
181   // at end of the block.
182   class monotonic_buffer_resource::_Chunk
183   {
184   public:
185     // Return the address and size of a block of memory allocated from __r,
186     // of at least __size bytes and aligned to __align.
187     // Add a new _Chunk to the front of the linked list at __head.
188     static pair<void*, size_t>
189     allocate(memory_resource* __r, size_t __size, size_t __align,
190 	     _Chunk*& __head)
191     {
192       __size = std::__ceil2(__size + sizeof(_Chunk));
193 
194       if constexpr (alignof(_Chunk) > 1)
195 	{
196 	  // PR libstdc++/90046
197 	  // For targets like epiphany-elf where alignof(_Chunk) != 1
198 	  // ensure that the last sizeof(_Chunk) bytes in the buffer
199 	  // are suitably-aligned for a _Chunk.
200 	  // This should be unnecessary, because the caller already
201 	  // passes in max(__align, alignof(max_align_t)).
202 	  if (__align < alignof(_Chunk))
203 	    __align = alignof(_Chunk);
204 	}
205 
206       void* __p = __r->allocate(__size, __align);
207 
208       // Add a chunk defined by (__p, __size, __align) to linked list __head.
209       void* const __back = (char*)__p + __size - sizeof(_Chunk);
210       __head = ::new(__back) _Chunk(__size, __align, __head);
211       return { __p, __size - sizeof(_Chunk) };
212     }
213 
214     // Return every chunk in linked list __head to resource __r.
215     static void
216     release(_Chunk*& __head, memory_resource* __r) noexcept
217     {
218       _Chunk* __next = __head;
219       __head = nullptr;
220       while (__next)
221 	{
222 	  _Chunk* __ch = __next;
223 	  __builtin_memcpy(&__next, __ch->_M_next, sizeof(_Chunk*));
224 
225 	  __glibcxx_assert(__ch->_M_canary != 0);
226 	  __glibcxx_assert(__ch->_M_canary == (__ch->_M_size|__ch->_M_align));
227 
228 	  if (__ch->_M_canary != (__ch->_M_size | __ch->_M_align))
229 	    return; // buffer overflow detected!
230 
231 	  size_t __size = (1u << __ch->_M_size);
232 	  size_t __align = (1u << __ch->_M_align);
233 	  void* __start = (char*)(__ch + 1) - __size;
234 	  __r->deallocate(__start, __size, __align);
235 	}
236     }
237 
238   private:
239     _Chunk(size_t __size, size_t __align, _Chunk* __next) noexcept
240     : _M_size(std::__log2p1(__size) - 1),
241       _M_align(std::__log2p1(__align) - 1)
242     {
243       __builtin_memcpy(_M_next, &__next, sizeof(__next));
244       _M_canary = _M_size | _M_align;
245     }
246 
247     unsigned char _M_canary;
248     unsigned char _M_size;
249     unsigned char _M_align;
250     unsigned char _M_next[sizeof(_Chunk*)];
251   };
252 
253   void
254   monotonic_buffer_resource::_M_new_buffer(size_t bytes, size_t alignment)
255   {
256     const size_t n = std::max(bytes, _M_next_bufsiz);
257     const size_t m = std::max(alignment, alignof(std::max_align_t));
258     auto [p, size] = _Chunk::allocate(_M_upstream, n, m, _M_head);
259     _M_current_buf = p;
260     _M_avail = size;
261     _M_next_bufsiz *= _S_growth_factor;
262   }
263 
264   void
265   monotonic_buffer_resource::_M_release_buffers() noexcept
266   {
267     _Chunk::release(_M_head, _M_upstream);
268   }
269 
270   // Helper types for synchronized_pool_resource & unsynchronized_pool_resource
271 
272   namespace {
273 
274   // Simple bitset with runtime size.
275   // Tracks which blocks in a pool chunk are used/unused.
276   struct bitset
277   {
278     using word = uint64_t;
279     using size_type // unsigned integer type with no more than 32 bits
280       = conditional_t<numeric_limits<size_t>::digits <= 32, size_t, uint32_t>;
281 
282     static constexpr unsigned bits_per_word = numeric_limits<word>::digits;
283 
284     // The bitset does not own p
285     bitset(void* p, size_type num_blocks)
286     : _M_words(static_cast<word*>(p)), _M_size(num_blocks),
287       _M_next_word(0)
288     {
289       const size_type last_word = num_blocks / bits_per_word;
290       __builtin_memset(_M_words, 0, last_word * sizeof(*_M_words));
291       // Set bits beyond _M_size, so they are not treated as free blocks:
292       if (const size_type extra_bits = num_blocks % bits_per_word)
293 	_M_words[last_word] = word(-1) << extra_bits;
294       __glibcxx_assert( empty() );
295       __glibcxx_assert( free() == num_blocks );
296     }
297 
298     bitset() = default;
299     ~bitset() = default;
300 
301     // Number of blocks
302     size_type size() const noexcept { return _M_size; }
303 
304     // Number of free blocks (unset bits)
305     size_type free() const noexcept
306     {
307       size_type n = 0;
308       for (size_type i = _M_next_word; i < nwords(); ++i)
309 	n += (bits_per_word - std::__popcount(_M_words[i]));
310       return n;
311     }
312 
313     // True if there are no free blocks (all bits are set)
314     bool full() const noexcept
315     {
316       if (_M_next_word >= nwords())
317 	return true;
318       // For a bitset with size() > (max_blocks_per_chunk() - 64) we will
319       // have nwords() == (max_word_index() + 1) and so _M_next_word will
320       // never be equal to nwords().
321       // In that case, check if the last word is full:
322       if (_M_next_word == max_word_index())
323 	return _M_words[_M_next_word] == word(-1);
324       return false;
325     }
326 
327     // True if size() != 0 and all blocks are free (no bits are set).
328     bool empty() const noexcept
329     {
330       if (nwords() == 0)
331 	return false;
332       if (_M_next_word != 0)
333 	return false;
334       for (size_type i = 0; i < nwords() - 1; ++i)
335 	if (_M_words[i] != 0)
336 	  return false;
337       word last = _M_words[nwords() - 1];
338       if (const size_type extra_bits = size() % bits_per_word)
339 	last <<= (bits_per_word - extra_bits);
340       return last == 0;
341     }
342 
343     void reset() noexcept
344     {
345       _M_words = nullptr;
346       _M_size = _M_next_word = 0;
347     }
348 
349     bool operator[](size_type n) const noexcept
350     {
351       __glibcxx_assert( n < _M_size );
352       const size_type wd = n / bits_per_word;
353       const word bit = word(1) << (n % bits_per_word);
354       return _M_words[wd] & bit;
355     }
356 
357     size_type get_first_unset() noexcept
358     {
359       const size_type wd = _M_next_word;
360       if (wd < nwords())
361 	{
362 	  const size_type n = std::__countr_one(_M_words[wd]);
363 	  if (n < bits_per_word)
364 	    {
365 	      const word bit = word(1) << n;
366 	      _M_words[wd] |= bit;
367 	      update_next_word();
368 	      return (wd * bits_per_word) + n;
369 	    }
370 	}
371       return size_type(-1);
372     }
373 
374     void set(size_type n) noexcept
375     {
376       __glibcxx_assert( n < _M_size );
377       const size_type wd = n / bits_per_word;
378       const word bit = word(1) << (n % bits_per_word);
379       _M_words[wd] |= bit;
380       if (wd == _M_next_word)
381 	update_next_word();
382     }
383 
384     void clear(size_type n) noexcept
385     {
386       __glibcxx_assert( n < _M_size );
387       const size_type wd = n / bits_per_word;
388       const word bit = word(1) << (n % bits_per_word);
389       _M_words[wd] &= ~bit;
390       if (wd < _M_next_word)
391 	_M_next_word = wd;
392     }
393 
394     // Update _M_next_word to refer to the next word with an unset bit.
395     // The size of the _M_next_word bit-field means it cannot represent
396     // the maximum possible nwords() value. To avoid wraparound to zero
397     // this function saturates _M_next_word at max_word_index().
398     void update_next_word() noexcept
399     {
400       size_type next = _M_next_word;
401       while (_M_words[next] == word(-1) && ++next < nwords())
402 	{ }
403       _M_next_word = std::min(next, max_word_index());
404     }
405 
406     void swap(bitset& b) noexcept
407     {
408       std::swap(_M_words, b._M_words);
409       size_type tmp = _M_size;
410       _M_size = b._M_size;
411       b._M_size = tmp;
412       tmp = _M_next_word;
413       _M_next_word = b._M_next_word;
414       b._M_next_word = tmp;
415     }
416 
417     size_type nwords() const noexcept
418     { return (_M_size + bits_per_word - 1) / bits_per_word; }
419 
420     // Maximum value that can be stored in bitset::_M_size member (approx 500k)
421     static constexpr size_type max_blocks_per_chunk() noexcept
422     { return (size_type(1) << _S_size_digits) - 1; }
423 
424     // Maximum value that can be stored in bitset::_M_next_word member (8191).
425     static constexpr size_type max_word_index() noexcept
426     { return (max_blocks_per_chunk() + bits_per_word - 1) / bits_per_word; }
427 
428     word* data() const noexcept { return _M_words; }
429 
430   private:
431     static constexpr unsigned _S_size_digits
432       = (numeric_limits<size_type>::digits
433 	  + std::__log2p1(bits_per_word) - 1) / 2;
434 
435     word* _M_words = nullptr;
436     // Number of blocks represented by the bitset:
437     size_type _M_size : _S_size_digits;
438     // Index of the first word with unset bits:
439     size_type _M_next_word : numeric_limits<size_type>::digits - _S_size_digits;
440   };
441 
442   // A "chunk" belonging to a pool.
443   // A chunk contains many blocks of the same size.
444   // Derived from bitset to reuse its tail-padding.
445   struct chunk : bitset
446   {
447     chunk() = default;
448 
449     // p points to the start of a chunk of size bytes in length.
450     // The chunk has space for n blocks, followed by a bitset of size n
451     // that begins at address words.
452     // This object does not own p or words, the caller will free it.
453     chunk(void* p, uint32_t bytes, void* words, size_t n)
454     : bitset(words, n),
455       _M_bytes(bytes),
456       _M_p(static_cast<std::byte*>(p))
457     { __glibcxx_assert(bytes <= chunk::max_bytes_per_chunk()); }
458 
459     chunk(chunk&& c) noexcept
460     : bitset(std::move(c)), _M_bytes(c._M_bytes), _M_p(c._M_p)
461     {
462       c._M_bytes = 0;
463       c._M_p = nullptr;
464       c.reset();
465     }
466 
467     chunk& operator=(chunk&& c) noexcept
468     {
469       swap(c);
470       return *this;
471     }
472 
473     // Allocated size of chunk:
474     uint32_t _M_bytes = 0;
475     // Start of allocated chunk:
476     std::byte* _M_p = nullptr;
477 
478     // True if there are free blocks in this chunk
479     using bitset::full;
480     // Number of blocks in this chunk
481     using bitset::size;
482 
483     static constexpr uint32_t max_bytes_per_chunk() noexcept
484     { return numeric_limits<decltype(_M_bytes)>::max(); }
485 
486     // Determine if block with address p and size block_size
487     // is contained within this chunk.
488     bool owns(void* p, size_t block_size)
489     {
490       std::less_equal<uintptr_t> less_equal;
491       return less_equal(reinterpret_cast<uintptr_t>(_M_p),
492 			reinterpret_cast<uintptr_t>(p))
493 	&& less_equal(reinterpret_cast<uintptr_t>(p) + block_size,
494 		      reinterpret_cast<uintptr_t>(bitset::data()));
495     }
496 
497     // Allocate next available block of block_size bytes from this chunk.
498     void* reserve(size_t block_size) noexcept
499     {
500       const size_type n = get_first_unset();
501       if (n == size_type(-1))
502 	return nullptr;
503       return _M_p + (n * block_size);
504     }
505 
506     // Deallocate a single block of block_size bytes
507     void release(void* vp, size_t block_size)
508     {
509       __glibcxx_assert( owns(vp, block_size) );
510       const size_t offset = static_cast<std::byte*>(vp) - _M_p;
511       // Pointer is correctly aligned for a block in this chunk:
512       __glibcxx_assert( (offset % block_size) == 0 );
513       // Block has been allocated:
514       __glibcxx_assert( (*this)[offset / block_size] == true );
515       bitset::clear(offset / block_size);
516     }
517 
518     // Deallocate a single block if it belongs to this chunk.
519     bool try_release(void* p, size_t block_size)
520     {
521       if (!owns(p, block_size))
522 	return false;
523       release(p, block_size);
524       return true;
525     }
526 
527     void swap(chunk& c) noexcept
528     {
529       std::swap(_M_bytes, c._M_bytes);
530       std::swap(_M_p, c._M_p);
531       bitset::swap(c);
532     }
533 
534     bool operator<(const chunk& c) const noexcept
535     { return std::less<const void*>{}(_M_p, c._M_p); }
536 
537     friend void swap(chunk& l, chunk& r) { l.swap(r); }
538 
539     friend bool operator<(const void* p, const chunk& c) noexcept
540     { return std::less<const void*>{}(p, c._M_p); }
541   };
542 
543   // For 64-bit pointers this is the size of three pointers i.e. 24 bytes.
544   // For 32-bit and 20-bit pointers it's four pointers (16 bytes).
545   // For 16-bit pointers it's five pointers (10 bytes).
546   // TODO pad 64-bit to 4*sizeof(void*) to avoid splitting across cache lines?
547   static_assert(sizeof(chunk)
548       == sizeof(bitset::size_type) + sizeof(uint32_t) + 2 * sizeof(void*));
549 
550   // An oversized allocation that doesn't fit in a pool.
551   struct big_block
552   {
553     // Alignment must be a power-of-two so we only need to use enough bits
554     // to store the power, not the actual value:
555     static constexpr unsigned _S_alignbits
556       = std::__log2p1((unsigned)numeric_limits<size_t>::digits - 1);
557     // Use the remaining bits to store the size:
558     static constexpr unsigned _S_sizebits
559       = numeric_limits<size_t>::digits - _S_alignbits;
560     // The maximum value that can be stored in _S_size
561     static constexpr size_t all_ones = size_t(-1) >> _S_alignbits;
562     // The minimum size of a big block (smaller sizes will be rounded up).
563     static constexpr size_t min = 1u << _S_alignbits;
564 
565     big_block(size_t bytes, size_t alignment)
566     : _M_size(alloc_size(bytes) >> _S_alignbits),
567       _M_align_exp(std::__log2p1(alignment) - 1u)
568     { }
569 
570     void* pointer = nullptr;
571     size_t _M_size : numeric_limits<size_t>::digits - _S_alignbits;
572     size_t _M_align_exp : _S_alignbits;
573 
574     size_t size() const noexcept
575     {
576       // If all bits are set in _M_size it means the maximum possible size:
577       if (__builtin_expect(_M_size == (size_t(-1) >> _S_alignbits), false))
578 	return (size_t)-1;
579       else
580 	return _M_size << _S_alignbits;
581     }
582 
583     size_t align() const noexcept { return size_t(1) << _M_align_exp; }
584 
585     // Calculate size to be allocated instead of requested number of bytes.
586     // The requested value will be rounded up to a multiple of big_block::min,
587     // so the low _S_alignbits bits are all zero and don't need to be stored.
588     static constexpr size_t alloc_size(size_t bytes) noexcept
589     {
590       const size_t s = bytes + min - 1u;
591       if (__builtin_expect(s < bytes, false))
592 	return size_t(-1); // addition wrapped past zero, return max value
593       else
594 	return s & ~(min - 1u);
595     }
596 
597     friend bool operator<(void* p, const big_block& b) noexcept
598     { return less<void*>{}(p, b.pointer); }
599 
600     friend bool operator<(const big_block& b, void* p) noexcept
601     { return less<void*>{}(b.pointer, p); }
602   };
603 
604   static_assert(sizeof(big_block) == (2 * sizeof(void*)));
605 
606   } // namespace
607 
608   // A pool that serves blocks of a particular size.
609   // Each pool manages a number of chunks.
610   // When a pool is full it is replenished by allocating another chunk.
611   struct __pool_resource::_Pool
612   {
613     // Smallest supported block size
614     static constexpr unsigned _S_min_block
615       = std::max(sizeof(void*), alignof(bitset::word));
616 
617     _Pool(size_t __block_size, size_t __blocks_per_chunk)
618     : _M_chunks(),
619       _M_block_sz(__block_size),
620       _M_blocks_per_chunk(__blocks_per_chunk)
621     { }
622 
623     // Must call release(r) before destruction!
624     ~_Pool() { __glibcxx_assert(_M_chunks.empty()); }
625 
626     _Pool(_Pool&&) noexcept = default;
627     _Pool& operator=(_Pool&&) noexcept = default;
628 
629     // Size of blocks in this pool
630     size_t block_size() const noexcept
631     { return _M_block_sz; }
632 
633     // Allocate a block if the pool is not full, otherwise return null.
634     void* try_allocate() noexcept
635     {
636       const size_t blocksz = block_size();
637       if (!_M_chunks.empty())
638 	{
639 	  auto& last = _M_chunks.back();
640 	  if (void* p = last.reserve(blocksz))
641 	    return p;
642 	  // TODO last is full, so move another chunk to the back instead?
643 	  for (auto it = _M_chunks.begin(); it != &last; ++it)
644 	    if (void* p = it->reserve(blocksz))
645 	      return p;
646 	}
647       return nullptr;
648     }
649 
650     // Allocate a block from the pool, replenishing from upstream if needed.
651     void* allocate(memory_resource* r, const pool_options& opts)
652     {
653       if (void* p = try_allocate())
654 	return p;
655       replenish(r, opts);
656       return _M_chunks.back().reserve(block_size());
657     }
658 
659     // Return a block to the pool.
660     bool deallocate(memory_resource*, void* p)
661     {
662       const size_t blocksz = block_size();
663       if (__builtin_expect(!_M_chunks.empty(), true))
664 	{
665 	  auto& last = _M_chunks.back();
666 	  if (last.try_release(p, blocksz))
667 	    return true;
668 	  auto it = std::upper_bound(_M_chunks.begin(), &last, p);
669 	  if (it != _M_chunks.begin())
670 	    {
671 	      it--;
672 	      if (it->try_release(p, blocksz))
673 		// If chunk is empty could return to upstream, but we don't
674 		// currently do that. Pools only increase in size.
675 		return true;
676 	    }
677 	}
678       return false;
679     }
680 
681     void replenish(memory_resource* __r, const pool_options& __opts)
682     {
683       using word = chunk::word;
684       const size_t __blocks = _M_blocks_per_chunk;
685       const auto __bits = chunk::bits_per_word;
686       const size_t __words = (__blocks + __bits - 1) / __bits;
687       const size_t __block_size = block_size();
688       size_t __bytes = __blocks * __block_size + __words * sizeof(word);
689       size_t __alignment = std::__ceil2(__block_size);
690       void* __p = __r->allocate(__bytes, __alignment);
691       __try
692 	{
693 	  size_t __n = __blocks * __block_size;
694 	  void* __pwords = static_cast<char*>(__p) + __n;
695 	  _M_chunks.insert(chunk(__p, __bytes, __pwords, __blocks), __r);
696 	}
697       __catch (...)
698 	{
699 	  __r->deallocate(__p, __bytes, __alignment);
700 	}
701       if (_M_blocks_per_chunk < __opts.max_blocks_per_chunk)
702 	{
703 	  const size_t max_blocks
704 	    = (chunk::max_bytes_per_chunk() - sizeof(word))
705 	    / (__block_size + 0.125);
706 	  _M_blocks_per_chunk = std::min({
707 	      max_blocks,
708 	      __opts.max_blocks_per_chunk,
709 	      (size_t)_M_blocks_per_chunk * 2
710 	  });
711 	}
712     }
713 
714     void release(memory_resource* __r)
715     {
716       const size_t __alignment = std::__ceil2(block_size());
717       for (auto& __c : _M_chunks)
718 	if (__c._M_p)
719 	  __r->deallocate(__c._M_p, __c._M_bytes, __alignment);
720       _M_chunks.clear(__r);
721     }
722 
723     // A "resourceless vector" instead of pmr::vector, to save space.
724     // All resize operations need to be passed a memory resource, which
725     // obviously needs to be the same one every time.
726     // Chunks are kept sorted by address of their first block, except for
727     // the most recently-allocated Chunk which is at the end of the vector.
728     struct vector
729     {
730       using value_type = chunk;
731       using size_type = unsigned;
732       using iterator = value_type*;
733 
734       // A vector owns its data pointer but not memory held by its elements.
735       chunk* data = nullptr;
736       size_type size = 0;
737       size_type capacity = 0;
738 
739       vector() = default;
740 
741       vector(size_type __n, memory_resource* __r)
742       : data(polymorphic_allocator<value_type>(__r).allocate(__n)),
743 	capacity(__n)
744       { }
745 
746       // Must call clear(r) before destruction!
747       ~vector() { __glibcxx_assert(data == nullptr); }
748 
749       vector(vector&& __rval) noexcept
750 	: data(__rval.data), size(__rval.size), capacity(__rval.capacity)
751       {
752 	__rval.data = nullptr;
753 	__rval.capacity = __rval.size = 0;
754       }
755 
756       vector& operator=(vector&& __rval) noexcept
757       {
758 	__glibcxx_assert(data == nullptr);
759 	data = __rval.data;
760 	size = __rval.size;
761 	capacity = __rval.capacity;
762 	__rval.data = nullptr;
763 	__rval.capacity = __rval.size = 0;
764 	return *this;
765       }
766 
767       // void resize(size_type __n, memory_resource* __r);
768       // void reserve(size_type __n, memory_resource* __r);
769 
770       void clear(memory_resource* __r)
771       {
772 	if (!data)
773 	  return;
774 	// Chunks must be individually freed before clearing the vector.
775 	std::destroy(begin(), end());
776 	polymorphic_allocator<value_type>(__r).deallocate(data, capacity);
777 	data = nullptr;
778 	capacity = size = 0;
779       }
780 
781       // Sort existing elements then insert new one at the end.
782       iterator insert(chunk&& c, memory_resource* r)
783       {
784 	if (size < capacity)
785 	  {
786 	    if (size > 1)
787 	      {
788 		auto mid = end() - 1;
789 		std::rotate(std::lower_bound(begin(), mid, *mid), mid, end());
790 	      }
791 	  }
792 	else if (size > 0)
793 	  {
794 	    polymorphic_allocator<value_type> __alloc(r);
795 	    auto __mid = std::lower_bound(begin(), end() - 1, back());
796 	    auto __p = __alloc.allocate(capacity * 1.5);
797 	    // move [begin,__mid) to new storage
798 	    auto __p2 = std::move(begin(), __mid, __p);
799 	    // move end-1 to new storage
800 	    *__p2 = std::move(back());
801 	    // move [__mid,end-1) to new storage
802 	    std::move(__mid, end() - 1, ++__p2);
803 	    std::destroy(begin(), end());
804 	    __alloc.deallocate(data, capacity);
805 	    data = __p;
806 	    capacity *= 1.5;
807 	  }
808 	else
809 	  {
810 	    polymorphic_allocator<value_type> __alloc(r);
811 	    data = __alloc.allocate(capacity = 8);
812 	  }
813 	auto back = ::new (data + size) chunk(std::move(c));
814 	__glibcxx_assert(std::is_sorted(begin(), back));
815 	++size;
816 	return back;
817       }
818 
819       iterator begin() const { return data; }
820       iterator end() const { return data + size; }
821 
822       bool empty() const noexcept { return size == 0; }
823 
824       value_type& back() { return data[size - 1]; }
825     };
826 
827     vector _M_chunks;
828     unsigned _M_block_sz; 	// size of blocks allocated from this pool
829     unsigned _M_blocks_per_chunk;	// number of blocks to allocate next
830   };
831 
832   // An oversized allocation that doesn't fit in a pool.
833   struct __pool_resource::_BigBlock : big_block
834   {
835     using big_block::big_block;
836   };
837 
838   namespace {
839 
840   constexpr size_t pool_sizes[] = {
841       8, 16, 24,
842       32, 48,
843       64, 80, 96, 112,
844       128, 192,
845       256, 320, 384, 448,
846       512, 768,
847 #if __SIZE_WIDTH__ > 16
848       1024, 1536,
849       2048, 3072,
850 #if __SIZE_WIDTH__ > 20
851       1<<12, 1<<13, 1<<14,
852       1<<15, 1<<16, 1<<17,
853       1<<20, 1<<21, 1<<22 // 4MB should be enough for anybody
854 #endif
855 #endif
856   };
857 
858   pool_options
859   munge_options(pool_options opts)
860   {
861     // The values in the returned struct may differ from those supplied
862     // to the pool resource constructor in that values of zero will be
863     // replaced with implementation-defined defaults, and sizes may be
864     // rounded to unspecified granularity.
865 
866     // max_blocks_per_chunk sets the absolute maximum for the pool resource.
867     // Each pool might have a smaller maximum, because pools for very large
868     // objects might impose  smaller limit.
869     if (opts.max_blocks_per_chunk == 0)
870       {
871 	// Pick a default that depends on the number of bits in size_t.
872 	opts.max_blocks_per_chunk = __SIZE_WIDTH__ << 8;
873       }
874     else
875       {
876 	// TODO round to preferred granularity ?
877       }
878 
879     if (opts.max_blocks_per_chunk > chunk::max_blocks_per_chunk())
880       {
881 	opts.max_blocks_per_chunk = chunk::max_blocks_per_chunk();
882       }
883 
884     // largest_required_pool_block specifies the largest block size that will
885     // be allocated from a pool. Larger allocations will come directly from
886     // the upstream resource and so will not be pooled.
887     if (opts.largest_required_pool_block == 0)
888       {
889 	// Pick a sensible default that depends on the number of bits in size_t
890 	// (pools with larger block sizes must be explicitly requested by
891 	// using a non-zero value for largest_required_pool_block).
892 	opts.largest_required_pool_block = __SIZE_WIDTH__ << 6;
893       }
894     else
895       {
896 	// Round to preferred granularity
897 	static_assert(std::__ispow2(pool_sizes[0]));
898 	constexpr size_t mask = pool_sizes[0] - 1;
899 	opts.largest_required_pool_block += mask;
900 	opts.largest_required_pool_block &= ~mask;
901       }
902 
903     if (opts.largest_required_pool_block < big_block::min)
904       {
905 	opts.largest_required_pool_block = big_block::min;
906       }
907     else if (opts.largest_required_pool_block > std::end(pool_sizes)[-1])
908       {
909 	// Setting _M_opts to the largest pool allows users to query it:
910 	opts.largest_required_pool_block = std::end(pool_sizes)[-1];
911       }
912     return opts;
913   }
914 
915   inline int
916   pool_index(size_t block_size, int npools)
917   {
918     auto p = std::lower_bound(pool_sizes, pool_sizes + npools, block_size);
919     int n = p - pool_sizes;
920     if (n != npools)
921       return n;
922     return -1;
923   }
924 
925   inline int
926   select_num_pools(const pool_options& opts)
927   {
928     auto p = std::lower_bound(std::begin(pool_sizes), std::end(pool_sizes),
929 			      opts.largest_required_pool_block);
930     const int n = p - std::begin(pool_sizes);
931     if (p == std::end(pool_sizes))
932       return n;
933     return n + 1;
934   }
935 
936 #ifdef _GLIBCXX_HAS_GTHREADS
937   using shared_lock = std::shared_lock<shared_mutex>;
938   using exclusive_lock = lock_guard<shared_mutex>;
939 #endif
940 
941   } // namespace
942 
943   __pool_resource::
944   __pool_resource(const pool_options& opts, memory_resource* upstream)
945   : _M_opts(munge_options(opts)), _M_unpooled(upstream),
946     _M_npools(select_num_pools(_M_opts))
947   { }
948 
949   __pool_resource::~__pool_resource() { release(); }
950 
951   void
952   __pool_resource::release() noexcept
953   {
954     memory_resource* res = resource();
955     // deallocate oversize allocations
956     for (auto& b : _M_unpooled)
957       res->deallocate(b.pointer, b.size(), b.align());
958     pmr::vector<_BigBlock>{res}.swap(_M_unpooled);
959   }
960 
961   void*
962   __pool_resource::allocate(size_t bytes, size_t alignment)
963   {
964     auto& b = _M_unpooled.emplace_back(bytes, alignment);
965     __try {
966       // N.B. need to allocate b.size(), which might be larger than bytes.
967       void* p = resource()->allocate(b.size(), alignment);
968       b.pointer = p;
969       if (_M_unpooled.size() > 1)
970 	{
971 	  const auto mid = _M_unpooled.end() - 1;
972 	  // move to right position in vector
973 	  std::rotate(std::lower_bound(_M_unpooled.begin(), mid, p),
974 		      mid, _M_unpooled.end());
975 	}
976       return p;
977     } __catch(...) {
978       _M_unpooled.pop_back();
979       __throw_exception_again;
980     }
981   }
982 
983   void
984   __pool_resource::deallocate(void* p, size_t bytes [[maybe_unused]],
985 			      size_t alignment [[maybe_unused]])
986   {
987     const auto it
988       = std::lower_bound(_M_unpooled.begin(), _M_unpooled.end(), p);
989     __glibcxx_assert(it != _M_unpooled.end() && it->pointer == p);
990     if (it != _M_unpooled.end() && it->pointer == p) // [[likely]]
991       {
992 	const auto b = *it;
993 	__glibcxx_assert(b.size() == b.alloc_size(bytes));
994 	__glibcxx_assert(b.align() == alignment);
995 	_M_unpooled.erase(it);
996 	// N.B. need to deallocate b.size(), which might be larger than bytes.
997 	resource()->deallocate(p, b.size(), b.align());
998       }
999   }
1000 
1001   // Create array of pools, allocated from upstream resource.
1002   auto
1003   __pool_resource::_M_alloc_pools()
1004   -> _Pool*
1005   {
1006     polymorphic_allocator<_Pool> alloc{resource()};
1007     _Pool* p = alloc.allocate(_M_npools);
1008     for (int i = 0; i < _M_npools; ++i)
1009       {
1010 	// For last pool use largest_required_pool_block
1011 	const size_t block_size = (i+1 == _M_npools)
1012 	  ? _M_opts.largest_required_pool_block
1013 	  : pool_sizes[i];
1014 
1015 	// Decide on initial number of blocks per chunk.
1016 	// Always have at least 16 blocks per chunk:
1017 	const size_t min_blocks_per_chunk = 16;
1018 	// But for smaller blocks, use a larger initial size:
1019 	size_t blocks_per_chunk
1020 	  = std::max(1024 / block_size, min_blocks_per_chunk);
1021 	// But don't exceed the requested max_blocks_per_chunk:
1022 	blocks_per_chunk
1023 	  = std::min(blocks_per_chunk, _M_opts.max_blocks_per_chunk);
1024 	// Allow space for bitset to track which blocks are used/unused:
1025 	blocks_per_chunk *= 1 - 1.0 / (__CHAR_BIT__ * block_size);
1026 	// Construct a _Pool for the given block size and initial chunk size:
1027 	alloc.construct(p + i, block_size, blocks_per_chunk);
1028       }
1029     return p;
1030   }
1031 
1032 #ifdef _GLIBCXX_HAS_GTHREADS
1033   // synchronized_pool_resource members.
1034 
1035   /* Notes on implementation and thread safety:
1036    *
1037    * Each synchronized_pool_resource manages an linked list of N+1 _TPools
1038    * objects, where N is the number of threads using the pool resource.
1039    * Each _TPools object has its own set of pools, with their own chunks.
1040    * The first element of the list, _M_tpools[0], can be used by any thread.
1041    * The rest of the list contains a _TPools object for each thread,
1042    * accessed via the thread-specific key _M_key (and referred to for
1043    * exposition as _M_tpools[_M_key]).
1044    * The first element, _M_tpools[0], contains "orphaned chunks" which were
1045    * allocated by a thread which has since exited, and so there is no
1046    * _M_tpools[_M_key] for that thread.
1047    * A thread can access its own thread-specific set of pools via _M_key
1048    * while holding a shared lock on _M_mx. Accessing _M_impl._M_unpooled
1049    * or _M_tpools[0] or any other thread's _M_tpools[_M_key] requires an
1050    * exclusive lock.
1051    * The upstream_resource() pointer can be obtained without a lock, but
1052    * any dereference of that pointer requires an exclusive lock.
1053    * The _M_impl._M_opts and _M_impl._M_npools members are immutable,
1054    * and can safely be accessed concurrently.
1055    */
1056 
1057   extern "C" {
1058     static void destroy_TPools(void*);
1059   }
1060 
1061   struct synchronized_pool_resource::_TPools
1062   {
1063     // Exclusive lock must be held in the thread where this constructor runs.
1064     explicit
1065     _TPools(synchronized_pool_resource& owner, exclusive_lock&)
1066     : owner(owner), pools(owner._M_impl._M_alloc_pools())
1067     {
1068       // __builtin_printf("%p constructing\n", this);
1069       __glibcxx_assert(pools);
1070     }
1071 
1072     // Exclusive lock must be held in the thread where this destructor runs.
1073     ~_TPools()
1074     {
1075       __glibcxx_assert(pools);
1076       if (pools)
1077 	{
1078 	  memory_resource* r = owner.upstream_resource();
1079 	  for (int i = 0; i < owner._M_impl._M_npools; ++i)
1080 	    pools[i].release(r);
1081 	  std::destroy_n(pools, owner._M_impl._M_npools);
1082 	  polymorphic_allocator<__pool_resource::_Pool> a(r);
1083 	  a.deallocate(pools, owner._M_impl._M_npools);
1084 	}
1085       if (prev)
1086 	prev->next = next;
1087       if (next)
1088 	next->prev = prev;
1089     }
1090 
1091     // Exclusive lock must be held in the thread where this function runs.
1092     void move_nonempty_chunks()
1093     {
1094       __glibcxx_assert(pools);
1095       if (!pools)
1096 	return;
1097       memory_resource* r = owner.upstream_resource();
1098       // move all non-empty chunks to the shared _TPools
1099       for (int i = 0; i < owner._M_impl._M_npools; ++i)
1100 	for (auto& c : pools[i]._M_chunks)
1101 	  if (!c.empty())
1102 	    owner._M_tpools->pools[i]._M_chunks.insert(std::move(c), r);
1103     }
1104 
1105     synchronized_pool_resource& owner;
1106     __pool_resource::_Pool* pools = nullptr;
1107     _TPools* prev = nullptr;
1108     _TPools* next = nullptr;
1109 
1110     static void destroy(_TPools* p)
1111     {
1112       exclusive_lock l(p->owner._M_mx);
1113       // __glibcxx_assert(p != p->owner._M_tpools);
1114       p->move_nonempty_chunks();
1115       polymorphic_allocator<_TPools> a(p->owner.upstream_resource());
1116       p->~_TPools();
1117       a.deallocate(p, 1);
1118     }
1119   };
1120 
1121   // Called when a thread exits
1122   extern "C" {
1123     static void destroy_TPools(void* p)
1124     {
1125       using _TPools = synchronized_pool_resource::_TPools;
1126       _TPools::destroy(static_cast<_TPools*>(p));
1127     }
1128   }
1129 
1130   // Constructor
1131   synchronized_pool_resource::
1132   synchronized_pool_resource(const pool_options& opts,
1133 			     memory_resource* upstream)
1134   : _M_impl(opts, upstream)
1135   {
1136     if (int err = __gthread_key_create(&_M_key, destroy_TPools))
1137       __throw_system_error(err);
1138     exclusive_lock l(_M_mx);
1139     _M_tpools = _M_alloc_shared_tpools(l);
1140   }
1141 
1142   // Destructor
1143   synchronized_pool_resource::~synchronized_pool_resource()
1144   {
1145     release();
1146     __gthread_key_delete(_M_key); // does not run destroy_TPools
1147   }
1148 
1149   void
1150   synchronized_pool_resource::release()
1151   {
1152     exclusive_lock l(_M_mx);
1153     if (_M_tpools)
1154       {
1155 	__gthread_key_delete(_M_key); // does not run destroy_TPools
1156 	__gthread_key_create(&_M_key, destroy_TPools);
1157 	polymorphic_allocator<_TPools> a(upstream_resource());
1158 	// destroy+deallocate each _TPools
1159 	do
1160 	  {
1161 	    _TPools* p = _M_tpools;
1162 	    _M_tpools = _M_tpools->next;
1163 	    p->~_TPools();
1164 	    a.deallocate(p, 1);
1165 	  }
1166 	while (_M_tpools);
1167       }
1168     // release unpooled memory
1169     _M_impl.release();
1170   }
1171 
1172   // Caller must hold shared or exclusive lock to ensure the pointer
1173   // isn't invalidated before it can be used.
1174   auto
1175   synchronized_pool_resource::_M_thread_specific_pools() noexcept
1176   {
1177     __pool_resource::_Pool* pools = nullptr;
1178     if (auto tp = static_cast<_TPools*>(__gthread_getspecific(_M_key)))
1179       {
1180       pools = tp->pools;
1181       __glibcxx_assert(tp->pools);
1182       }
1183     return pools;
1184   }
1185 
1186   // Override for memory_resource::do_allocate
1187   void*
1188   synchronized_pool_resource::
1189   do_allocate(size_t bytes, size_t alignment)
1190   {
1191     const auto block_size = std::max(bytes, alignment);
1192     if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1193       {
1194 	const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
1195 	memory_resource* const r = upstream_resource();
1196 	const pool_options opts = _M_impl._M_opts;
1197 	{
1198 	  // Try to allocate from the thread-specific pool
1199 	  shared_lock l(_M_mx);
1200 	  if (auto pools = _M_thread_specific_pools()) // [[likely]]
1201 	    {
1202 	      // Need exclusive lock to replenish so use try_allocate:
1203 	      if (void* p = pools[index].try_allocate())
1204 		return p;
1205 	      // Need to take exclusive lock and replenish pool.
1206 	    }
1207 	  // Need to allocate or replenish thread-specific pools using
1208 	  // upstream resource, so need to hold exclusive lock.
1209 	}
1210 	// N.B. Another thread could call release() now lock is not held.
1211 	exclusive_lock excl(_M_mx);
1212 	if (!_M_tpools) // [[unlikely]]
1213 	  _M_tpools = _M_alloc_shared_tpools(excl);
1214 	auto pools = _M_thread_specific_pools();
1215 	if (!pools)
1216 	  pools = _M_alloc_tpools(excl)->pools;
1217 	return pools[index].allocate(r, opts);
1218       }
1219     exclusive_lock l(_M_mx);
1220     return _M_impl.allocate(bytes, alignment); // unpooled allocation
1221   }
1222 
1223   // Override for memory_resource::do_deallocate
1224   void
1225   synchronized_pool_resource::
1226   do_deallocate(void* p, size_t bytes, size_t alignment)
1227   {
1228     size_t block_size = std::max(bytes, alignment);
1229     if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1230       {
1231 	const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
1232 	__glibcxx_assert(index != -1);
1233 	{
1234 	  shared_lock l(_M_mx);
1235 	  auto pools = _M_thread_specific_pools();
1236 	  if (pools)
1237 	    {
1238 	      // No need to lock here, no other thread is accessing this pool.
1239 	      if (pools[index].deallocate(upstream_resource(), p))
1240 		return;
1241 	    }
1242 	  // Block might have come from a different thread's pool,
1243 	  // take exclusive lock and check every pool.
1244 	}
1245 	// TODO store {p, bytes, alignment} somewhere and defer returning
1246 	// the block to the correct thread-specific pool until we next
1247 	// take the exclusive lock.
1248 	exclusive_lock excl(_M_mx);
1249 	for (_TPools* t = _M_tpools; t != nullptr; t = t->next)
1250 	  {
1251 	    if (t->pools) // [[likely]]
1252 	      {
1253 		if (t->pools[index].deallocate(upstream_resource(), p))
1254 		  return;
1255 	      }
1256 	  }
1257       }
1258     exclusive_lock l(_M_mx);
1259     _M_impl.deallocate(p, bytes, alignment);
1260   }
1261 
1262   // Allocate a thread-specific _TPools object and add it to the linked list.
1263   auto
1264   synchronized_pool_resource::_M_alloc_tpools(exclusive_lock& l)
1265   -> _TPools*
1266   {
1267     __glibcxx_assert(_M_tpools != nullptr);
1268     // dump_list(_M_tpools);
1269     polymorphic_allocator<_TPools> a(upstream_resource());
1270     _TPools* p = a.allocate(1);
1271     bool constructed = false;
1272     __try
1273       {
1274 	a.construct(p, *this, l);
1275 	constructed = true;
1276 	// __glibcxx_assert(__gthread_getspecific(_M_key) == nullptr);
1277 	if (int err = __gthread_setspecific(_M_key, p))
1278 	  __throw_system_error(err);
1279       }
1280     __catch(...)
1281       {
1282 	if (constructed)
1283 	  a.destroy(p);
1284 	a.deallocate(p, 1);
1285 	__throw_exception_again;
1286       }
1287     p->prev = _M_tpools;
1288     p->next = _M_tpools->next;
1289     _M_tpools->next = p;
1290     if (p->next)
1291       p->next->prev = p;
1292     return p;
1293   }
1294 
1295   // Allocate the shared _TPools object, _M_tpools[0]
1296   auto
1297   synchronized_pool_resource::_M_alloc_shared_tpools(exclusive_lock& l)
1298   -> _TPools*
1299   {
1300     __glibcxx_assert(_M_tpools == nullptr);
1301     polymorphic_allocator<_TPools> a(upstream_resource());
1302     _TPools* p = a.allocate(1);
1303     __try
1304       {
1305 	a.construct(p, *this, l);
1306       }
1307     __catch(...)
1308       {
1309 	a.deallocate(p, 1);
1310 	__throw_exception_again;
1311       }
1312     // __glibcxx_assert(p->next == nullptr);
1313     // __glibcxx_assert(p->prev == nullptr);
1314     return p;
1315   }
1316 #endif // _GLIBCXX_HAS_GTHREADS
1317 
1318   // unsynchronized_pool_resource member functions
1319 
1320   // Constructor
1321   unsynchronized_pool_resource::
1322   unsynchronized_pool_resource(const pool_options& opts,
1323 			       memory_resource* upstream)
1324   : _M_impl(opts, upstream), _M_pools(_M_impl._M_alloc_pools())
1325   { }
1326 
1327   // Destructor
1328   unsynchronized_pool_resource::~unsynchronized_pool_resource()
1329   { release(); }
1330 
1331   // Return all memory to upstream resource.
1332   void
1333   unsynchronized_pool_resource::release()
1334   {
1335     // release pooled memory
1336     if (_M_pools)
1337       {
1338 	memory_resource* res = upstream_resource();
1339 	polymorphic_allocator<_Pool> alloc{res};
1340 	for (int i = 0; i < _M_impl._M_npools; ++i)
1341 	  {
1342 	    _M_pools[i].release(res);
1343 	    alloc.destroy(_M_pools + i);
1344 	  }
1345 	alloc.deallocate(_M_pools, _M_impl._M_npools);
1346 	_M_pools = nullptr;
1347       }
1348 
1349     // release unpooled memory
1350     _M_impl.release();
1351   }
1352 
1353   // Find the right pool for a block of size block_size.
1354   auto
1355   unsynchronized_pool_resource::_M_find_pool(size_t block_size) noexcept
1356   {
1357     __pool_resource::_Pool* pool = nullptr;
1358     if (_M_pools) // [[likely]]
1359       {
1360 	int index = pool_index(block_size, _M_impl._M_npools);
1361 	if (index != -1)
1362 	  pool = _M_pools + index;
1363       }
1364     return pool;
1365   }
1366 
1367   // Override for memory_resource::do_allocate
1368   void*
1369   unsynchronized_pool_resource::do_allocate(size_t bytes, size_t alignment)
1370   {
1371     const auto block_size = std::max(bytes, alignment);
1372     if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1373       {
1374 	// Recreate pools if release() has been called:
1375 	if (__builtin_expect(_M_pools == nullptr, false))
1376 	  _M_pools = _M_impl._M_alloc_pools();
1377 	if (auto pool = _M_find_pool(block_size))
1378 	  return pool->allocate(upstream_resource(), _M_impl._M_opts);
1379       }
1380     return _M_impl.allocate(bytes, alignment);
1381   }
1382 
1383   // Override for memory_resource::do_deallocate
1384   void
1385   unsynchronized_pool_resource::
1386   do_deallocate(void* p, size_t bytes, size_t alignment)
1387   {
1388     size_t block_size = std::max(bytes, alignment);
1389     if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1390       {
1391 	if (auto pool = _M_find_pool(block_size))
1392 	  {
1393 	    pool->deallocate(upstream_resource(), p);
1394 	    return;
1395 	  }
1396       }
1397     _M_impl.deallocate(p, bytes, alignment);
1398   }
1399 
1400 } // namespace pmr
1401 _GLIBCXX_END_NAMESPACE_VERSION
1402 } // namespace std
1403