1 // hashtable.h header -*- C++ -*-
2
3 // Copyright (C) 2007-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file bits/hashtable.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28 */
29
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32
33 #pragma GCC system_header
34
35 #include <bits/hashtable_policy.h>
36 #include <bits/enable_special_members.h>
37 #if __cplusplus > 201402L
38 # include <bits/node_handle.h>
39 #endif
40 #include <bits/functional_hash.h>
41 #include <bits/stl_function.h> // equal_to, _Identity, _Select1st
42
_GLIBCXX_VISIBILITY(default)43 namespace std _GLIBCXX_VISIBILITY(default)
44 {
45 _GLIBCXX_BEGIN_NAMESPACE_VERSION
46 /// @cond undocumented
47
48 template<typename _Tp, typename _Hash>
49 using __cache_default
50 = __not_<__and_<// Do not cache for fast hasher.
51 __is_fast_hash<_Hash>,
52 // Mandatory to have erase not throwing.
53 __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
54
55 // Helper to conditionally delete the default constructor.
56 // The _Hash_node_base type is used to distinguish this specialization
57 // from any other potentially-overlapping subobjects of the hashtable.
58 template<typename _Equal, typename _Hash, typename _Allocator>
59 using _Hashtable_enable_default_ctor
60 = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
61 is_default_constructible<_Hash>,
62 is_default_constructible<_Allocator>>{},
63 __detail::_Hash_node_base>;
64
65 /**
66 * Primary class template _Hashtable.
67 *
68 * @ingroup hashtable-detail
69 *
70 * @tparam _Value CopyConstructible type.
71 *
72 * @tparam _Key CopyConstructible type.
73 *
74 * @tparam _Alloc An allocator type
75 * ([lib.allocator.requirements]) whose _Alloc::value_type is
76 * _Value. As a conforming extension, we allow for
77 * _Alloc::value_type != _Value.
78 *
79 * @tparam _ExtractKey Function object that takes an object of type
80 * _Value and returns a value of type _Key.
81 *
82 * @tparam _Equal Function object that takes two objects of type k
83 * and returns a bool-like value that is true if the two objects
84 * are considered equal.
85 *
86 * @tparam _Hash The hash function. A unary function object with
87 * argument type _Key and result type size_t. Return values should
88 * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
89 *
90 * @tparam _RangeHash The range-hashing function (in the terminology of
91 * Tavori and Dreizin). A binary function object whose argument
92 * types and result type are all size_t. Given arguments r and N,
93 * the return value is in the range [0, N).
94 *
95 * @tparam _Unused Not used.
96 *
97 * @tparam _RehashPolicy Policy class with three members, all of
98 * which govern the bucket count. _M_next_bkt(n) returns a bucket
99 * count no smaller than n. _M_bkt_for_elements(n) returns a
100 * bucket count appropriate for an element count of n.
101 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
102 * current bucket count is n_bkt and the current element count is
103 * n_elt, we need to increase the bucket count for n_ins insertions.
104 * If so, returns make_pair(true, n), where n is the new bucket count. If
105 * not, returns make_pair(false, <anything>)
106 *
107 * @tparam _Traits Compile-time class with three boolean
108 * std::integral_constant members: __cache_hash_code, __constant_iterators,
109 * __unique_keys.
110 *
111 * Each _Hashtable data structure has:
112 *
113 * - _Bucket[] _M_buckets
114 * - _Hash_node_base _M_before_begin
115 * - size_type _M_bucket_count
116 * - size_type _M_element_count
117 *
118 * with _Bucket being _Hash_node_base* and _Hash_node containing:
119 *
120 * - _Hash_node* _M_next
121 * - Tp _M_value
122 * - size_t _M_hash_code if cache_hash_code is true
123 *
124 * In terms of Standard containers the hashtable is like the aggregation of:
125 *
126 * - std::forward_list<_Node> containing the elements
127 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
128 *
129 * The non-empty buckets contain the node before the first node in the
130 * bucket. This design makes it possible to implement something like a
131 * std::forward_list::insert_after on container insertion and
132 * std::forward_list::erase_after on container erase
133 * calls. _M_before_begin is equivalent to
134 * std::forward_list::before_begin. Empty buckets contain
135 * nullptr. Note that one of the non-empty buckets contains
136 * &_M_before_begin which is not a dereferenceable node so the
137 * node pointer in a bucket shall never be dereferenced, only its
138 * next node can be.
139 *
140 * Walking through a bucket's nodes requires a check on the hash code to
141 * see if each node is still in the bucket. Such a design assumes a
142 * quite efficient hash functor and is one of the reasons it is
143 * highly advisable to set __cache_hash_code to true.
144 *
145 * The container iterators are simply built from nodes. This way
146 * incrementing the iterator is perfectly efficient independent of
147 * how many empty buckets there are in the container.
148 *
149 * On insert we compute the element's hash code and use it to find the
150 * bucket index. If the element must be inserted in an empty bucket
151 * we add it at the beginning of the singly linked list and make the
152 * bucket point to _M_before_begin. The bucket that used to point to
153 * _M_before_begin, if any, is updated to point to its new before
154 * begin node.
155 *
156 * On erase, the simple iterator design requires using the hash
157 * functor to get the index of the bucket to update. For this
158 * reason, when __cache_hash_code is set to false the hash functor must
159 * not throw and this is enforced by a static assertion.
160 *
161 * Functionality is implemented by decomposition into base classes,
162 * where the derived _Hashtable class is used in _Map_base,
163 * _Insert, _Rehash_base, and _Equality base classes to access the
164 * "this" pointer. _Hashtable_base is used in the base classes as a
165 * non-recursive, fully-completed-type so that detailed nested type
166 * information, such as iterator type and node type, can be
167 * used. This is similar to the "Curiously Recurring Template
168 * Pattern" (CRTP) technique, but uses a reconstructed, not
169 * explicitly passed, template pattern.
170 *
171 * Base class templates are:
172 * - __detail::_Hashtable_base
173 * - __detail::_Map_base
174 * - __detail::_Insert
175 * - __detail::_Rehash_base
176 * - __detail::_Equality
177 */
178 template<typename _Key, typename _Value, typename _Alloc,
179 typename _ExtractKey, typename _Equal,
180 typename _Hash, typename _RangeHash, typename _Unused,
181 typename _RehashPolicy, typename _Traits>
182 class _Hashtable
183 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
184 _Hash, _RangeHash, _Unused, _Traits>,
185 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
186 _Hash, _RangeHash, _Unused,
187 _RehashPolicy, _Traits>,
188 public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
189 _Hash, _RangeHash, _Unused,
190 _RehashPolicy, _Traits>,
191 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
192 _Hash, _RangeHash, _Unused,
193 _RehashPolicy, _Traits>,
194 public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
195 _Hash, _RangeHash, _Unused,
196 _RehashPolicy, _Traits>,
197 private __detail::_Hashtable_alloc<
198 __alloc_rebind<_Alloc,
199 __detail::_Hash_node<_Value,
200 _Traits::__hash_cached::value>>>,
201 private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
202 {
203 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
204 "unordered container must have a non-const, non-volatile value_type");
205 #if __cplusplus > 201703L || defined __STRICT_ANSI__
206 static_assert(is_same<typename _Alloc::value_type, _Value>{},
207 "unordered container must have the same value_type as its allocator");
208 #endif
209
210 using __traits_type = _Traits;
211 using __hash_cached = typename __traits_type::__hash_cached;
212 using __constant_iterators = typename __traits_type::__constant_iterators;
213 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
214 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
215
216 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
217
218 using __node_value_type =
219 __detail::_Hash_node_value<_Value, __hash_cached::value>;
220 using __node_ptr = typename __hashtable_alloc::__node_ptr;
221 using __value_alloc_traits =
222 typename __hashtable_alloc::__value_alloc_traits;
223 using __node_alloc_traits =
224 typename __hashtable_alloc::__node_alloc_traits;
225 using __node_base = typename __hashtable_alloc::__node_base;
226 using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
227 using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
228
229 using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
230 _Equal, _Hash,
231 _RangeHash, _Unused,
232 _RehashPolicy, _Traits>;
233 using __enable_default_ctor
234 = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
235
236 public:
237 typedef _Key key_type;
238 typedef _Value value_type;
239 typedef _Alloc allocator_type;
240 typedef _Equal key_equal;
241
242 // mapped_type, if present, comes from _Map_base.
243 // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
244 typedef typename __value_alloc_traits::pointer pointer;
245 typedef typename __value_alloc_traits::const_pointer const_pointer;
246 typedef value_type& reference;
247 typedef const value_type& const_reference;
248
249 using iterator = typename __insert_base::iterator;
250
251 using const_iterator = typename __insert_base::const_iterator;
252
253 using local_iterator = __detail::_Local_iterator<key_type, _Value,
254 _ExtractKey, _Hash, _RangeHash, _Unused,
255 __constant_iterators::value,
256 __hash_cached::value>;
257
258 using const_local_iterator = __detail::_Local_const_iterator<
259 key_type, _Value,
260 _ExtractKey, _Hash, _RangeHash, _Unused,
261 __constant_iterators::value, __hash_cached::value>;
262
263 private:
264 using __rehash_type = _RehashPolicy;
265 using __rehash_state = typename __rehash_type::_State;
266
267 using __unique_keys = typename __traits_type::__unique_keys;
268
269 using __hashtable_base = __detail::
270 _Hashtable_base<_Key, _Value, _ExtractKey,
271 _Equal, _Hash, _RangeHash, _Unused, _Traits>;
272
273 using __hash_code_base = typename __hashtable_base::__hash_code_base;
274 using __hash_code = typename __hashtable_base::__hash_code;
275 using __ireturn_type = typename __insert_base::__ireturn_type;
276
277 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
278 _Equal, _Hash, _RangeHash, _Unused,
279 _RehashPolicy, _Traits>;
280
281 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
282 _ExtractKey, _Equal,
283 _Hash, _RangeHash, _Unused,
284 _RehashPolicy, _Traits>;
285
286 using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
287 _Equal, _Hash, _RangeHash, _Unused,
288 _RehashPolicy, _Traits>;
289
290 using __reuse_or_alloc_node_gen_t =
291 __detail::_ReuseOrAllocNode<__node_alloc_type>;
292 using __alloc_node_gen_t =
293 __detail::_AllocNode<__node_alloc_type>;
294 using __node_builder_t =
295 __detail::_NodeBuilder<_ExtractKey>;
296
297 // Simple RAII type for managing a node containing an element
298 struct _Scoped_node
299 {
300 // Take ownership of a node with a constructed element.
301 _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
302 : _M_h(__h), _M_node(__n) { }
303
304 // Allocate a node and construct an element within it.
305 template<typename... _Args>
306 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
307 : _M_h(__h),
308 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
309 { }
310
311 // Destroy element and deallocate node.
312 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
313
314 _Scoped_node(const _Scoped_node&) = delete;
315 _Scoped_node& operator=(const _Scoped_node&) = delete;
316
317 __hashtable_alloc* _M_h;
318 __node_ptr _M_node;
319 };
320
321 template<typename _Ht>
322 static constexpr
323 __conditional_t<std::is_lvalue_reference<_Ht>::value,
324 const value_type&, value_type&&>
325 __fwd_value_for(value_type& __val) noexcept
326 { return std::move(__val); }
327
328 // Compile-time diagnostics.
329
330 // _Hash_code_base has everything protected, so use this derived type to
331 // access it.
332 struct __hash_code_base_access : __hash_code_base
333 { using __hash_code_base::_M_bucket_index; };
334
335 // To get bucket index we need _RangeHash not to throw.
336 static_assert(is_nothrow_default_constructible<_RangeHash>::value,
337 "Functor used to map hash code to bucket index"
338 " must be nothrow default constructible");
339 static_assert(noexcept(
340 std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
341 "Functor used to map hash code to bucket index must be"
342 " noexcept");
343
344 // To compute bucket index we also need _ExtratKey not to throw.
345 static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
346 "_ExtractKey must be nothrow default constructible");
347 static_assert(noexcept(
348 std::declval<const _ExtractKey&>()(std::declval<_Value>())),
349 "_ExtractKey functor must be noexcept invocable");
350
351 template<typename _Keya, typename _Valuea, typename _Alloca,
352 typename _ExtractKeya, typename _Equala,
353 typename _Hasha, typename _RangeHasha, typename _Unuseda,
354 typename _RehashPolicya, typename _Traitsa,
355 bool _Unique_keysa>
356 friend struct __detail::_Map_base;
357
358 template<typename _Keya, typename _Valuea, typename _Alloca,
359 typename _ExtractKeya, typename _Equala,
360 typename _Hasha, typename _RangeHasha, typename _Unuseda,
361 typename _RehashPolicya, typename _Traitsa>
362 friend struct __detail::_Insert_base;
363
364 template<typename _Keya, typename _Valuea, typename _Alloca,
365 typename _ExtractKeya, typename _Equala,
366 typename _Hasha, typename _RangeHasha, typename _Unuseda,
367 typename _RehashPolicya, typename _Traitsa,
368 bool _Constant_iteratorsa>
369 friend struct __detail::_Insert;
370
371 template<typename _Keya, typename _Valuea, typename _Alloca,
372 typename _ExtractKeya, typename _Equala,
373 typename _Hasha, typename _RangeHasha, typename _Unuseda,
374 typename _RehashPolicya, typename _Traitsa,
375 bool _Unique_keysa>
376 friend struct __detail::_Equality;
377
378 public:
379 using size_type = typename __hashtable_base::size_type;
380 using difference_type = typename __hashtable_base::difference_type;
381
382 #if __cplusplus > 201402L
383 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
384 using insert_return_type = _Node_insert_return<iterator, node_type>;
385 #endif
386
387 private:
388 __buckets_ptr _M_buckets = &_M_single_bucket;
389 size_type _M_bucket_count = 1;
390 __node_base _M_before_begin;
391 size_type _M_element_count = 0;
392 _RehashPolicy _M_rehash_policy;
393
394 // A single bucket used when only need for 1 bucket. Especially
395 // interesting in move semantic to leave hashtable with only 1 bucket
396 // which is not allocated so that we can have those operations noexcept
397 // qualified.
398 // Note that we can't leave hashtable with 0 bucket without adding
399 // numerous checks in the code to avoid 0 modulus.
400 __node_base_ptr _M_single_bucket = nullptr;
401
402 void
403 _M_update_bbegin()
404 {
405 if (_M_begin())
406 _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
407 }
408
409 void
410 _M_update_bbegin(__node_ptr __n)
411 {
412 _M_before_begin._M_nxt = __n;
413 _M_update_bbegin();
414 }
415
416 bool
417 _M_uses_single_bucket(__buckets_ptr __bkts) const
418 { return __builtin_expect(__bkts == &_M_single_bucket, false); }
419
420 bool
421 _M_uses_single_bucket() const
422 { return _M_uses_single_bucket(_M_buckets); }
423
424 static constexpr size_t
425 __small_size_threshold() noexcept
426 {
427 return
428 __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
429 }
430
431 __hashtable_alloc&
432 _M_base_alloc() { return *this; }
433
434 __buckets_ptr
435 _M_allocate_buckets(size_type __bkt_count)
436 {
437 if (__builtin_expect(__bkt_count == 1, false))
438 {
439 _M_single_bucket = nullptr;
440 return &_M_single_bucket;
441 }
442
443 return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
444 }
445
446 void
447 _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
448 {
449 if (_M_uses_single_bucket(__bkts))
450 return;
451
452 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
453 }
454
455 void
456 _M_deallocate_buckets()
457 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
458
459 // Gets bucket begin, deals with the fact that non-empty buckets contain
460 // their before begin node.
461 __node_ptr
462 _M_bucket_begin(size_type __bkt) const;
463
464 __node_ptr
465 _M_begin() const
466 { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
467
468 // Assign *this using another _Hashtable instance. Whether elements
469 // are copied or moved depends on the _Ht reference.
470 template<typename _Ht>
471 void
472 _M_assign_elements(_Ht&&);
473
474 template<typename _Ht, typename _NodeGenerator>
475 void
476 _M_assign(_Ht&&, const _NodeGenerator&);
477
478 void
479 _M_move_assign(_Hashtable&&, true_type);
480
481 void
482 _M_move_assign(_Hashtable&&, false_type);
483
484 void
485 _M_reset() noexcept;
486
487 _Hashtable(const _Hash& __h, const _Equal& __eq,
488 const allocator_type& __a)
489 : __hashtable_base(__h, __eq),
490 __hashtable_alloc(__node_alloc_type(__a)),
491 __enable_default_ctor(_Enable_default_constructor_tag{})
492 { }
493
494 template<bool _No_realloc = true>
495 static constexpr bool
496 _S_nothrow_move()
497 {
498 #if __cplusplus <= 201402L
499 return __and_<__bool_constant<_No_realloc>,
500 is_nothrow_copy_constructible<_Hash>,
501 is_nothrow_copy_constructible<_Equal>>::value;
502 #else
503 if constexpr (_No_realloc)
504 if constexpr (is_nothrow_copy_constructible<_Hash>())
505 return is_nothrow_copy_constructible<_Equal>();
506 return false;
507 #endif
508 }
509
510 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
511 true_type /* alloc always equal */)
512 noexcept(_S_nothrow_move());
513
514 _Hashtable(_Hashtable&&, __node_alloc_type&&,
515 false_type /* alloc always equal */);
516
517 template<typename _InputIterator>
518 _Hashtable(_InputIterator __first, _InputIterator __last,
519 size_type __bkt_count_hint,
520 const _Hash&, const _Equal&, const allocator_type&,
521 true_type __uks);
522
523 template<typename _InputIterator>
524 _Hashtable(_InputIterator __first, _InputIterator __last,
525 size_type __bkt_count_hint,
526 const _Hash&, const _Equal&, const allocator_type&,
527 false_type __uks);
528
529 public:
530 // Constructor, destructor, assignment, swap
531 _Hashtable() = default;
532
533 _Hashtable(const _Hashtable&);
534
535 _Hashtable(const _Hashtable&, const allocator_type&);
536
537 explicit
538 _Hashtable(size_type __bkt_count_hint,
539 const _Hash& __hf = _Hash(),
540 const key_equal& __eql = key_equal(),
541 const allocator_type& __a = allocator_type());
542
543 // Use delegating constructors.
544 _Hashtable(_Hashtable&& __ht)
545 noexcept(_S_nothrow_move())
546 : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
547 true_type{})
548 { }
549
550 _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
551 noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
552 : _Hashtable(std::move(__ht), __node_alloc_type(__a),
553 typename __node_alloc_traits::is_always_equal{})
554 { }
555
556 explicit
557 _Hashtable(const allocator_type& __a)
558 : __hashtable_alloc(__node_alloc_type(__a)),
559 __enable_default_ctor(_Enable_default_constructor_tag{})
560 { }
561
562 template<typename _InputIterator>
563 _Hashtable(_InputIterator __f, _InputIterator __l,
564 size_type __bkt_count_hint = 0,
565 const _Hash& __hf = _Hash(),
566 const key_equal& __eql = key_equal(),
567 const allocator_type& __a = allocator_type())
568 : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
569 __unique_keys{})
570 { }
571
572 _Hashtable(initializer_list<value_type> __l,
573 size_type __bkt_count_hint = 0,
574 const _Hash& __hf = _Hash(),
575 const key_equal& __eql = key_equal(),
576 const allocator_type& __a = allocator_type())
577 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
578 __hf, __eql, __a, __unique_keys{})
579 { }
580
581 _Hashtable&
582 operator=(const _Hashtable& __ht);
583
584 _Hashtable&
585 operator=(_Hashtable&& __ht)
586 noexcept(__node_alloc_traits::_S_nothrow_move()
587 && is_nothrow_move_assignable<_Hash>::value
588 && is_nothrow_move_assignable<_Equal>::value)
589 {
590 constexpr bool __move_storage =
591 __node_alloc_traits::_S_propagate_on_move_assign()
592 || __node_alloc_traits::_S_always_equal();
593 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
594 return *this;
595 }
596
597 _Hashtable&
598 operator=(initializer_list<value_type> __l)
599 {
600 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
601 _M_before_begin._M_nxt = nullptr;
602 clear();
603
604 // We consider that all elements of __l are going to be inserted.
605 auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
606
607 // Do not shrink to keep potential user reservation.
608 if (_M_bucket_count < __l_bkt_count)
609 rehash(__l_bkt_count);
610
611 this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
612 return *this;
613 }
614
615 ~_Hashtable() noexcept;
616
617 void
618 swap(_Hashtable&)
619 noexcept(__and_<__is_nothrow_swappable<_Hash>,
620 __is_nothrow_swappable<_Equal>>::value);
621
622 // Basic container operations
623 iterator
624 begin() noexcept
625 { return iterator(_M_begin()); }
626
627 const_iterator
628 begin() const noexcept
629 { return const_iterator(_M_begin()); }
630
631 iterator
632 end() noexcept
633 { return iterator(nullptr); }
634
635 const_iterator
636 end() const noexcept
637 { return const_iterator(nullptr); }
638
639 const_iterator
640 cbegin() const noexcept
641 { return const_iterator(_M_begin()); }
642
643 const_iterator
644 cend() const noexcept
645 { return const_iterator(nullptr); }
646
647 size_type
648 size() const noexcept
649 { return _M_element_count; }
650
651 _GLIBCXX_NODISCARD bool
652 empty() const noexcept
653 { return size() == 0; }
654
655 allocator_type
656 get_allocator() const noexcept
657 { return allocator_type(this->_M_node_allocator()); }
658
659 size_type
660 max_size() const noexcept
661 { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
662
663 // Observers
664 key_equal
665 key_eq() const
666 { return this->_M_eq(); }
667
668 // hash_function, if present, comes from _Hash_code_base.
669
670 // Bucket operations
671 size_type
672 bucket_count() const noexcept
673 { return _M_bucket_count; }
674
675 size_type
676 max_bucket_count() const noexcept
677 { return max_size(); }
678
679 size_type
680 bucket_size(size_type __bkt) const
681 { return std::distance(begin(__bkt), end(__bkt)); }
682
683 size_type
684 bucket(const key_type& __k) const
685 { return _M_bucket_index(this->_M_hash_code(__k)); }
686
687 local_iterator
688 begin(size_type __bkt)
689 {
690 return local_iterator(*this, _M_bucket_begin(__bkt),
691 __bkt, _M_bucket_count);
692 }
693
694 local_iterator
695 end(size_type __bkt)
696 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
697
698 const_local_iterator
699 begin(size_type __bkt) const
700 {
701 return const_local_iterator(*this, _M_bucket_begin(__bkt),
702 __bkt, _M_bucket_count);
703 }
704
705 const_local_iterator
706 end(size_type __bkt) const
707 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
708
709 // DR 691.
710 const_local_iterator
711 cbegin(size_type __bkt) const
712 {
713 return const_local_iterator(*this, _M_bucket_begin(__bkt),
714 __bkt, _M_bucket_count);
715 }
716
717 const_local_iterator
718 cend(size_type __bkt) const
719 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
720
721 float
722 load_factor() const noexcept
723 {
724 return static_cast<float>(size()) / static_cast<float>(bucket_count());
725 }
726
727 // max_load_factor, if present, comes from _Rehash_base.
728
729 // Generalization of max_load_factor. Extension, not found in
730 // TR1. Only useful if _RehashPolicy is something other than
731 // the default.
732 const _RehashPolicy&
733 __rehash_policy() const
734 { return _M_rehash_policy; }
735
736 void
737 __rehash_policy(const _RehashPolicy& __pol)
738 { _M_rehash_policy = __pol; }
739
740 // Lookup.
741 iterator
742 find(const key_type& __k);
743
744 const_iterator
745 find(const key_type& __k) const;
746
747 size_type
748 count(const key_type& __k) const;
749
750 std::pair<iterator, iterator>
751 equal_range(const key_type& __k);
752
753 std::pair<const_iterator, const_iterator>
754 equal_range(const key_type& __k) const;
755
756 #if __cplusplus >= 202002L
757 #define __cpp_lib_generic_unordered_lookup 201811L
758
759 template<typename _Kt,
760 typename = __has_is_transparent_t<_Hash, _Kt>,
761 typename = __has_is_transparent_t<_Equal, _Kt>>
762 iterator
763 _M_find_tr(const _Kt& __k);
764
765 template<typename _Kt,
766 typename = __has_is_transparent_t<_Hash, _Kt>,
767 typename = __has_is_transparent_t<_Equal, _Kt>>
768 const_iterator
769 _M_find_tr(const _Kt& __k) const;
770
771 template<typename _Kt,
772 typename = __has_is_transparent_t<_Hash, _Kt>,
773 typename = __has_is_transparent_t<_Equal, _Kt>>
774 size_type
775 _M_count_tr(const _Kt& __k) const;
776
777 template<typename _Kt,
778 typename = __has_is_transparent_t<_Hash, _Kt>,
779 typename = __has_is_transparent_t<_Equal, _Kt>>
780 pair<iterator, iterator>
781 _M_equal_range_tr(const _Kt& __k);
782
783 template<typename _Kt,
784 typename = __has_is_transparent_t<_Hash, _Kt>,
785 typename = __has_is_transparent_t<_Equal, _Kt>>
786 pair<const_iterator, const_iterator>
787 _M_equal_range_tr(const _Kt& __k) const;
788 #endif // C++20
789
790 private:
791 // Bucket index computation helpers.
792 size_type
793 _M_bucket_index(const __node_value_type& __n) const noexcept
794 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
795
796 size_type
797 _M_bucket_index(__hash_code __c) const
798 { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
799
800 __node_base_ptr
801 _M_find_before_node(const key_type&);
802
803 // Find and insert helper functions and types
804 // Find the node before the one matching the criteria.
805 __node_base_ptr
806 _M_find_before_node(size_type, const key_type&, __hash_code) const;
807
808 template<typename _Kt>
809 __node_base_ptr
810 _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
811
812 __node_ptr
813 _M_find_node(size_type __bkt, const key_type& __key,
814 __hash_code __c) const
815 {
816 __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
817 if (__before_n)
818 return static_cast<__node_ptr>(__before_n->_M_nxt);
819 return nullptr;
820 }
821
822 template<typename _Kt>
823 __node_ptr
824 _M_find_node_tr(size_type __bkt, const _Kt& __key,
825 __hash_code __c) const
826 {
827 auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
828 if (__before_n)
829 return static_cast<__node_ptr>(__before_n->_M_nxt);
830 return nullptr;
831 }
832
833 // Insert a node at the beginning of a bucket.
834 void
835 _M_insert_bucket_begin(size_type, __node_ptr);
836
837 // Remove the bucket first node
838 void
839 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
840 size_type __next_bkt);
841
842 // Get the node before __n in the bucket __bkt
843 __node_base_ptr
844 _M_get_previous_node(size_type __bkt, __node_ptr __n);
845
846 pair<const_iterator, __hash_code>
847 _M_compute_hash_code(const_iterator __hint, const key_type& __k) const;
848
849 // Insert node __n with hash code __code, in bucket __bkt if no
850 // rehash (assumes no element with same key already present).
851 // Takes ownership of __n if insertion succeeds, throws otherwise.
852 iterator
853 _M_insert_unique_node(size_type __bkt, __hash_code,
854 __node_ptr __n, size_type __n_elt = 1);
855
856 // Insert node __n with key __k and hash code __code.
857 // Takes ownership of __n if insertion succeeds, throws otherwise.
858 iterator
859 _M_insert_multi_node(__node_ptr __hint,
860 __hash_code __code, __node_ptr __n);
861
862 template<typename... _Args>
863 std::pair<iterator, bool>
864 _M_emplace(true_type __uks, _Args&&... __args);
865
866 template<typename... _Args>
867 iterator
868 _M_emplace(false_type __uks, _Args&&... __args)
869 { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
870
871 // Emplace with hint, useless when keys are unique.
872 template<typename... _Args>
873 iterator
874 _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
875 { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
876
877 template<typename... _Args>
878 iterator
879 _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
880
881 template<typename _Kt, typename _Arg, typename _NodeGenerator>
882 std::pair<iterator, bool>
883 _M_insert_unique(_Kt&&, _Arg&&, const _NodeGenerator&);
884
885 template<typename _Kt>
886 static __conditional_t<
887 __and_<__is_nothrow_invocable<_Hash&, const key_type&>,
888 __not_<__is_nothrow_invocable<_Hash&, _Kt>>>::value,
889 key_type, _Kt&&>
890 _S_forward_key(_Kt&& __k)
891 { return std::forward<_Kt>(__k); }
892
893 static const key_type&
894 _S_forward_key(const key_type& __k)
895 { return __k; }
896
897 static key_type&&
898 _S_forward_key(key_type&& __k)
899 { return std::move(__k); }
900
901 template<typename _Arg, typename _NodeGenerator>
902 std::pair<iterator, bool>
903 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
904 true_type /* __uks */)
905 {
906 return _M_insert_unique(
907 _S_forward_key(_ExtractKey{}(std::forward<_Arg>(__arg))),
908 std::forward<_Arg>(__arg), __node_gen);
909 }
910
911 template<typename _Arg, typename _NodeGenerator>
912 iterator
913 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
914 false_type __uks)
915 {
916 return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
917 __uks);
918 }
919
920 // Insert with hint, not used when keys are unique.
921 template<typename _Arg, typename _NodeGenerator>
922 iterator
923 _M_insert(const_iterator, _Arg&& __arg,
924 const _NodeGenerator& __node_gen, true_type __uks)
925 {
926 return
927 _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
928 }
929
930 // Insert with hint when keys are not unique.
931 template<typename _Arg, typename _NodeGenerator>
932 iterator
933 _M_insert(const_iterator, _Arg&&,
934 const _NodeGenerator&, false_type __uks);
935
936 size_type
937 _M_erase(true_type __uks, const key_type&);
938
939 size_type
940 _M_erase(false_type __uks, const key_type&);
941
942 iterator
943 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
944
945 public:
946 // Emplace
947 template<typename... _Args>
948 __ireturn_type
949 emplace(_Args&&... __args)
950 { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
951
952 template<typename... _Args>
953 iterator
954 emplace_hint(const_iterator __hint, _Args&&... __args)
955 {
956 return _M_emplace(__hint, __unique_keys{},
957 std::forward<_Args>(__args)...);
958 }
959
960 // Insert member functions via inheritance.
961
962 // Erase
963 iterator
964 erase(const_iterator);
965
966 // LWG 2059.
967 iterator
968 erase(iterator __it)
969 { return erase(const_iterator(__it)); }
970
971 size_type
972 erase(const key_type& __k)
973 { return _M_erase(__unique_keys{}, __k); }
974
975 iterator
976 erase(const_iterator, const_iterator);
977
978 void
979 clear() noexcept;
980
981 // Set number of buckets keeping it appropriate for container's number
982 // of elements.
983 void rehash(size_type __bkt_count);
984
985 // DR 1189.
986 // reserve, if present, comes from _Rehash_base.
987
988 #if __cplusplus > 201404L
989 /// Re-insert an extracted node into a container with unique keys.
990 insert_return_type
991 _M_reinsert_node(node_type&& __nh)
992 {
993 insert_return_type __ret;
994 if (__nh.empty())
995 __ret.position = end();
996 else
997 {
998 __glibcxx_assert(get_allocator() == __nh.get_allocator());
999
1000 const key_type& __k = __nh._M_key();
1001 __hash_code __code = this->_M_hash_code(__k);
1002 size_type __bkt = _M_bucket_index(__code);
1003 if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
1004 {
1005 __ret.node = std::move(__nh);
1006 __ret.position = iterator(__n);
1007 __ret.inserted = false;
1008 }
1009 else
1010 {
1011 __ret.position
1012 = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1013 __nh.release();
1014 __ret.inserted = true;
1015 }
1016 }
1017 return __ret;
1018 }
1019
1020 /// Re-insert an extracted node into a container with equivalent keys.
1021 iterator
1022 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1023 {
1024 if (__nh.empty())
1025 return end();
1026
1027 __glibcxx_assert(get_allocator() == __nh.get_allocator());
1028
1029 const key_type& __k = __nh._M_key();
1030 auto __code = this->_M_hash_code(__k);
1031 auto __ret
1032 = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1033 __nh.release();
1034 return __ret;
1035 }
1036
1037 private:
1038 node_type
1039 _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1040 {
1041 __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1042 if (__prev_n == _M_buckets[__bkt])
1043 _M_remove_bucket_begin(__bkt, __n->_M_next(),
1044 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1045 else if (__n->_M_nxt)
1046 {
1047 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1048 if (__next_bkt != __bkt)
1049 _M_buckets[__next_bkt] = __prev_n;
1050 }
1051
1052 __prev_n->_M_nxt = __n->_M_nxt;
1053 __n->_M_nxt = nullptr;
1054 --_M_element_count;
1055 return { __n, this->_M_node_allocator() };
1056 }
1057
1058 // Only use the possibly cached node's hash code if its hash function
1059 // _H2 matches _Hash and is stateless. Otherwise recompute it using _Hash.
1060 template<typename _H2>
1061 __hash_code
1062 _M_src_hash_code(const _H2&, const key_type& __k,
1063 const __node_value_type& __src_n) const
1064 {
1065 if constexpr (std::is_same_v<_H2, _Hash>)
1066 if constexpr (std::is_empty_v<_Hash>)
1067 return this->_M_hash_code(__src_n);
1068
1069 return this->_M_hash_code(__k);
1070 }
1071
1072 public:
1073 // Extract a node.
1074 node_type
1075 extract(const_iterator __pos)
1076 {
1077 size_t __bkt = _M_bucket_index(*__pos._M_cur);
1078 return _M_extract_node(__bkt,
1079 _M_get_previous_node(__bkt, __pos._M_cur));
1080 }
1081
1082 /// Extract a node.
1083 node_type
1084 extract(const _Key& __k)
1085 {
1086 node_type __nh;
1087 __hash_code __code = this->_M_hash_code(__k);
1088 std::size_t __bkt = _M_bucket_index(__code);
1089 if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1090 __nh = _M_extract_node(__bkt, __prev_node);
1091 return __nh;
1092 }
1093
1094 /// Merge from a compatible container into one with unique keys.
1095 template<typename _Compatible_Hashtable>
1096 void
1097 _M_merge_unique(_Compatible_Hashtable& __src)
1098 {
1099 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1100 node_type>, "Node types are compatible");
1101 __glibcxx_assert(get_allocator() == __src.get_allocator());
1102
1103 auto __n_elt = __src.size();
1104 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1105 {
1106 auto __pos = __i++;
1107 const key_type& __k = _ExtractKey{}(*__pos);
1108 __hash_code __code
1109 = _M_src_hash_code(__src.hash_function(), __k, *__pos._M_cur);
1110 size_type __bkt = _M_bucket_index(__code);
1111 if (_M_find_node(__bkt, __k, __code) == nullptr)
1112 {
1113 auto __nh = __src.extract(__pos);
1114 _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1115 __nh.release();
1116 __n_elt = 1;
1117 }
1118 else if (__n_elt != 1)
1119 --__n_elt;
1120 }
1121 }
1122
1123 /// Merge from a compatible container into one with equivalent keys.
1124 template<typename _Compatible_Hashtable>
1125 void
1126 _M_merge_multi(_Compatible_Hashtable& __src)
1127 {
1128 static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1129 node_type>, "Node types are compatible");
1130 __glibcxx_assert(get_allocator() == __src.get_allocator());
1131
1132 __node_ptr __hint = nullptr;
1133 this->reserve(size() + __src.size());
1134 for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1135 {
1136 auto __pos = __i++;
1137 const key_type& __k = _ExtractKey{}(*__pos);
1138 __hash_code __code
1139 = _M_src_hash_code(__src.hash_function(), __k, *__pos._M_cur);
1140 auto __nh = __src.extract(__pos);
1141 __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1142 __nh.release();
1143 }
1144 }
1145 #endif // C++17
1146
1147 private:
1148 // Helper rehash method used when keys are unique.
1149 void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1150
1151 // Helper rehash method used when keys can be non-unique.
1152 void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1153
1154 // Unconditionally change size of bucket array to n, restore
1155 // hash policy state to __state on exception.
1156 void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1157 };
1158
1159 // Definitions of class template _Hashtable's out-of-line member functions.
1160 template<typename _Key, typename _Value, typename _Alloc,
1161 typename _ExtractKey, typename _Equal,
1162 typename _Hash, typename _RangeHash, typename _Unused,
1163 typename _RehashPolicy, typename _Traits>
1164 auto
1165 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1166 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1167 _M_bucket_begin(size_type __bkt) const
1168 -> __node_ptr
1169 {
1170 __node_base_ptr __n = _M_buckets[__bkt];
1171 return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1172 }
1173
1174 template<typename _Key, typename _Value, typename _Alloc,
1175 typename _ExtractKey, typename _Equal,
1176 typename _Hash, typename _RangeHash, typename _Unused,
1177 typename _RehashPolicy, typename _Traits>
1178 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1179 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1180 _Hashtable(size_type __bkt_count_hint,
1181 const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1182 : _Hashtable(__h, __eq, __a)
1183 {
1184 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1185 if (__bkt_count > _M_bucket_count)
1186 {
1187 _M_buckets = _M_allocate_buckets(__bkt_count);
1188 _M_bucket_count = __bkt_count;
1189 }
1190 }
1191
1192 template<typename _Key, typename _Value, typename _Alloc,
1193 typename _ExtractKey, typename _Equal,
1194 typename _Hash, typename _RangeHash, typename _Unused,
1195 typename _RehashPolicy, typename _Traits>
1196 template<typename _InputIterator>
1197 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1198 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1199 _Hashtable(_InputIterator __f, _InputIterator __l,
1200 size_type __bkt_count_hint,
1201 const _Hash& __h, const _Equal& __eq,
1202 const allocator_type& __a, true_type /* __uks */)
1203 : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1204 {
1205 for (; __f != __l; ++__f)
1206 this->insert(*__f);
1207 }
1208
1209 template<typename _Key, typename _Value, typename _Alloc,
1210 typename _ExtractKey, typename _Equal,
1211 typename _Hash, typename _RangeHash, typename _Unused,
1212 typename _RehashPolicy, typename _Traits>
1213 template<typename _InputIterator>
1214 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1215 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1216 _Hashtable(_InputIterator __f, _InputIterator __l,
1217 size_type __bkt_count_hint,
1218 const _Hash& __h, const _Equal& __eq,
1219 const allocator_type& __a, false_type /* __uks */)
1220 : _Hashtable(__h, __eq, __a)
1221 {
1222 auto __nb_elems = __detail::__distance_fw(__f, __l);
1223 auto __bkt_count =
1224 _M_rehash_policy._M_next_bkt(
1225 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1226 __bkt_count_hint));
1227
1228 if (__bkt_count > _M_bucket_count)
1229 {
1230 _M_buckets = _M_allocate_buckets(__bkt_count);
1231 _M_bucket_count = __bkt_count;
1232 }
1233
1234 for (; __f != __l; ++__f)
1235 this->insert(*__f);
1236 }
1237
1238 template<typename _Key, typename _Value, typename _Alloc,
1239 typename _ExtractKey, typename _Equal,
1240 typename _Hash, typename _RangeHash, typename _Unused,
1241 typename _RehashPolicy, typename _Traits>
1242 auto
1243 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1244 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1245 operator=(const _Hashtable& __ht)
1246 -> _Hashtable&
1247 {
1248 if (&__ht == this)
1249 return *this;
1250
1251 if (__node_alloc_traits::_S_propagate_on_copy_assign())
1252 {
1253 auto& __this_alloc = this->_M_node_allocator();
1254 auto& __that_alloc = __ht._M_node_allocator();
1255 if (!__node_alloc_traits::_S_always_equal()
1256 && __this_alloc != __that_alloc)
1257 {
1258 // Replacement allocator cannot free existing storage.
1259 this->_M_deallocate_nodes(_M_begin());
1260 _M_before_begin._M_nxt = nullptr;
1261 _M_deallocate_buckets();
1262 _M_buckets = nullptr;
1263 std::__alloc_on_copy(__this_alloc, __that_alloc);
1264 __hashtable_base::operator=(__ht);
1265 _M_bucket_count = __ht._M_bucket_count;
1266 _M_element_count = __ht._M_element_count;
1267 _M_rehash_policy = __ht._M_rehash_policy;
1268 __alloc_node_gen_t __alloc_node_gen(*this);
1269 __try
1270 {
1271 _M_assign(__ht, __alloc_node_gen);
1272 }
1273 __catch(...)
1274 {
1275 // _M_assign took care of deallocating all memory. Now we
1276 // must make sure this instance remains in a usable state.
1277 _M_reset();
1278 __throw_exception_again;
1279 }
1280 return *this;
1281 }
1282 std::__alloc_on_copy(__this_alloc, __that_alloc);
1283 }
1284
1285 // Reuse allocated buckets and nodes.
1286 _M_assign_elements(__ht);
1287 return *this;
1288 }
1289
1290 template<typename _Key, typename _Value, typename _Alloc,
1291 typename _ExtractKey, typename _Equal,
1292 typename _Hash, typename _RangeHash, typename _Unused,
1293 typename _RehashPolicy, typename _Traits>
1294 template<typename _Ht>
1295 void
1296 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1297 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1298 _M_assign_elements(_Ht&& __ht)
1299 {
1300 __buckets_ptr __former_buckets = nullptr;
1301 std::size_t __former_bucket_count = _M_bucket_count;
1302 const __rehash_state& __former_state = _M_rehash_policy._M_state();
1303
1304 if (_M_bucket_count != __ht._M_bucket_count)
1305 {
1306 __former_buckets = _M_buckets;
1307 _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1308 _M_bucket_count = __ht._M_bucket_count;
1309 }
1310 else
1311 __builtin_memset(_M_buckets, 0,
1312 _M_bucket_count * sizeof(__node_base_ptr));
1313
1314 __try
1315 {
1316 __hashtable_base::operator=(std::forward<_Ht>(__ht));
1317 _M_element_count = __ht._M_element_count;
1318 _M_rehash_policy = __ht._M_rehash_policy;
1319 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1320 _M_before_begin._M_nxt = nullptr;
1321 _M_assign(std::forward<_Ht>(__ht), __roan);
1322 if (__former_buckets)
1323 _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1324 }
1325 __catch(...)
1326 {
1327 if (__former_buckets)
1328 {
1329 // Restore previous buckets.
1330 _M_deallocate_buckets();
1331 _M_rehash_policy._M_reset(__former_state);
1332 _M_buckets = __former_buckets;
1333 _M_bucket_count = __former_bucket_count;
1334 }
1335 __builtin_memset(_M_buckets, 0,
1336 _M_bucket_count * sizeof(__node_base_ptr));
1337 __throw_exception_again;
1338 }
1339 }
1340
1341 template<typename _Key, typename _Value, typename _Alloc,
1342 typename _ExtractKey, typename _Equal,
1343 typename _Hash, typename _RangeHash, typename _Unused,
1344 typename _RehashPolicy, typename _Traits>
1345 template<typename _Ht, typename _NodeGenerator>
1346 void
1347 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1348 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1349 _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1350 {
1351 __buckets_ptr __buckets = nullptr;
1352 if (!_M_buckets)
1353 _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1354
1355 __try
1356 {
1357 if (!__ht._M_before_begin._M_nxt)
1358 return;
1359
1360 // First deal with the special first node pointed to by
1361 // _M_before_begin.
1362 __node_ptr __ht_n = __ht._M_begin();
1363 __node_ptr __this_n
1364 = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1365 this->_M_copy_code(*__this_n, *__ht_n);
1366 _M_update_bbegin(__this_n);
1367
1368 // Then deal with other nodes.
1369 __node_ptr __prev_n = __this_n;
1370 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1371 {
1372 __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1373 __prev_n->_M_nxt = __this_n;
1374 this->_M_copy_code(*__this_n, *__ht_n);
1375 size_type __bkt = _M_bucket_index(*__this_n);
1376 if (!_M_buckets[__bkt])
1377 _M_buckets[__bkt] = __prev_n;
1378 __prev_n = __this_n;
1379 }
1380 }
1381 __catch(...)
1382 {
1383 clear();
1384 if (__buckets)
1385 _M_deallocate_buckets();
1386 __throw_exception_again;
1387 }
1388 }
1389
1390 template<typename _Key, typename _Value, typename _Alloc,
1391 typename _ExtractKey, typename _Equal,
1392 typename _Hash, typename _RangeHash, typename _Unused,
1393 typename _RehashPolicy, typename _Traits>
1394 void
1395 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1396 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1397 _M_reset() noexcept
1398 {
1399 _M_rehash_policy._M_reset();
1400 _M_bucket_count = 1;
1401 _M_single_bucket = nullptr;
1402 _M_buckets = &_M_single_bucket;
1403 _M_before_begin._M_nxt = nullptr;
1404 _M_element_count = 0;
1405 }
1406
1407 template<typename _Key, typename _Value, typename _Alloc,
1408 typename _ExtractKey, typename _Equal,
1409 typename _Hash, typename _RangeHash, typename _Unused,
1410 typename _RehashPolicy, typename _Traits>
1411 void
1412 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1413 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1414 _M_move_assign(_Hashtable&& __ht, true_type)
1415 {
1416 if (__builtin_expect(std::__addressof(__ht) == this, false))
1417 return;
1418
1419 this->_M_deallocate_nodes(_M_begin());
1420 _M_deallocate_buckets();
1421 __hashtable_base::operator=(std::move(__ht));
1422 _M_rehash_policy = __ht._M_rehash_policy;
1423 if (!__ht._M_uses_single_bucket())
1424 _M_buckets = __ht._M_buckets;
1425 else
1426 {
1427 _M_buckets = &_M_single_bucket;
1428 _M_single_bucket = __ht._M_single_bucket;
1429 }
1430
1431 _M_bucket_count = __ht._M_bucket_count;
1432 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1433 _M_element_count = __ht._M_element_count;
1434 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1435
1436 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1437 _M_update_bbegin();
1438 __ht._M_reset();
1439 }
1440
1441 template<typename _Key, typename _Value, typename _Alloc,
1442 typename _ExtractKey, typename _Equal,
1443 typename _Hash, typename _RangeHash, typename _Unused,
1444 typename _RehashPolicy, typename _Traits>
1445 void
1446 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1447 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1448 _M_move_assign(_Hashtable&& __ht, false_type)
1449 {
1450 if (__ht._M_node_allocator() == this->_M_node_allocator())
1451 _M_move_assign(std::move(__ht), true_type{});
1452 else
1453 {
1454 // Can't move memory, move elements then.
1455 _M_assign_elements(std::move(__ht));
1456 __ht.clear();
1457 }
1458 }
1459
1460 template<typename _Key, typename _Value, typename _Alloc,
1461 typename _ExtractKey, typename _Equal,
1462 typename _Hash, typename _RangeHash, typename _Unused,
1463 typename _RehashPolicy, typename _Traits>
1464 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1465 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1466 _Hashtable(const _Hashtable& __ht)
1467 : __hashtable_base(__ht),
1468 __map_base(__ht),
1469 __rehash_base(__ht),
1470 __hashtable_alloc(
1471 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1472 __enable_default_ctor(__ht),
1473 _M_buckets(nullptr),
1474 _M_bucket_count(__ht._M_bucket_count),
1475 _M_element_count(__ht._M_element_count),
1476 _M_rehash_policy(__ht._M_rehash_policy)
1477 {
1478 __alloc_node_gen_t __alloc_node_gen(*this);
1479 _M_assign(__ht, __alloc_node_gen);
1480 }
1481
1482 template<typename _Key, typename _Value, typename _Alloc,
1483 typename _ExtractKey, typename _Equal,
1484 typename _Hash, typename _RangeHash, typename _Unused,
1485 typename _RehashPolicy, typename _Traits>
1486 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1487 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1488 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1489 true_type /* alloc always equal */)
1490 noexcept(_S_nothrow_move())
1491 : __hashtable_base(__ht),
1492 __map_base(__ht),
1493 __rehash_base(__ht),
1494 __hashtable_alloc(std::move(__a)),
1495 __enable_default_ctor(__ht),
1496 _M_buckets(__ht._M_buckets),
1497 _M_bucket_count(__ht._M_bucket_count),
1498 _M_before_begin(__ht._M_before_begin._M_nxt),
1499 _M_element_count(__ht._M_element_count),
1500 _M_rehash_policy(__ht._M_rehash_policy)
1501 {
1502 // Update buckets if __ht is using its single bucket.
1503 if (__ht._M_uses_single_bucket())
1504 {
1505 _M_buckets = &_M_single_bucket;
1506 _M_single_bucket = __ht._M_single_bucket;
1507 }
1508
1509 // Fix bucket containing the _M_before_begin pointer that can't be moved.
1510 _M_update_bbegin();
1511
1512 __ht._M_reset();
1513 }
1514
1515 template<typename _Key, typename _Value, typename _Alloc,
1516 typename _ExtractKey, typename _Equal,
1517 typename _Hash, typename _RangeHash, typename _Unused,
1518 typename _RehashPolicy, typename _Traits>
1519 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1520 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1521 _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1522 : __hashtable_base(__ht),
1523 __map_base(__ht),
1524 __rehash_base(__ht),
1525 __hashtable_alloc(__node_alloc_type(__a)),
1526 __enable_default_ctor(__ht),
1527 _M_buckets(),
1528 _M_bucket_count(__ht._M_bucket_count),
1529 _M_element_count(__ht._M_element_count),
1530 _M_rehash_policy(__ht._M_rehash_policy)
1531 {
1532 __alloc_node_gen_t __alloc_node_gen(*this);
1533 _M_assign(__ht, __alloc_node_gen);
1534 }
1535
1536 template<typename _Key, typename _Value, typename _Alloc,
1537 typename _ExtractKey, typename _Equal,
1538 typename _Hash, typename _RangeHash, typename _Unused,
1539 typename _RehashPolicy, typename _Traits>
1540 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1541 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1542 _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1543 false_type /* alloc always equal */)
1544 : __hashtable_base(__ht),
1545 __map_base(__ht),
1546 __rehash_base(__ht),
1547 __hashtable_alloc(std::move(__a)),
1548 __enable_default_ctor(__ht),
1549 _M_buckets(nullptr),
1550 _M_bucket_count(__ht._M_bucket_count),
1551 _M_element_count(__ht._M_element_count),
1552 _M_rehash_policy(__ht._M_rehash_policy)
1553 {
1554 if (__ht._M_node_allocator() == this->_M_node_allocator())
1555 {
1556 if (__ht._M_uses_single_bucket())
1557 {
1558 _M_buckets = &_M_single_bucket;
1559 _M_single_bucket = __ht._M_single_bucket;
1560 }
1561 else
1562 _M_buckets = __ht._M_buckets;
1563
1564 // Fix bucket containing the _M_before_begin pointer that can't be
1565 // moved.
1566 _M_update_bbegin(__ht._M_begin());
1567
1568 __ht._M_reset();
1569 }
1570 else
1571 {
1572 __alloc_node_gen_t __alloc_gen(*this);
1573
1574 using _Fwd_Ht = __conditional_t<
1575 __move_if_noexcept_cond<value_type>::value,
1576 const _Hashtable&, _Hashtable&&>;
1577 _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1578 __ht.clear();
1579 }
1580 }
1581
1582 template<typename _Key, typename _Value, typename _Alloc,
1583 typename _ExtractKey, typename _Equal,
1584 typename _Hash, typename _RangeHash, typename _Unused,
1585 typename _RehashPolicy, typename _Traits>
1586 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1587 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1588 ~_Hashtable() noexcept
1589 {
1590 // Getting a bucket index from a node shall not throw because it is used
1591 // in methods (erase, swap...) that shall not throw. Need a complete
1592 // type to check this, so do it in the destructor not at class scope.
1593 static_assert(noexcept(declval<const __hash_code_base_access&>()
1594 ._M_bucket_index(declval<const __node_value_type&>(),
1595 (std::size_t)0)),
1596 "Cache the hash code or qualify your functors involved"
1597 " in hash code and bucket index computation with noexcept");
1598
1599 clear();
1600 _M_deallocate_buckets();
1601 }
1602
1603 template<typename _Key, typename _Value, typename _Alloc,
1604 typename _ExtractKey, typename _Equal,
1605 typename _Hash, typename _RangeHash, typename _Unused,
1606 typename _RehashPolicy, typename _Traits>
1607 void
1608 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1609 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1610 swap(_Hashtable& __x)
1611 noexcept(__and_<__is_nothrow_swappable<_Hash>,
1612 __is_nothrow_swappable<_Equal>>::value)
1613 {
1614 // The only base class with member variables is hash_code_base.
1615 // We define _Hash_code_base::_M_swap because different
1616 // specializations have different members.
1617 this->_M_swap(__x);
1618
1619 std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1620 std::swap(_M_rehash_policy, __x._M_rehash_policy);
1621
1622 // Deal properly with potentially moved instances.
1623 if (this->_M_uses_single_bucket())
1624 {
1625 if (!__x._M_uses_single_bucket())
1626 {
1627 _M_buckets = __x._M_buckets;
1628 __x._M_buckets = &__x._M_single_bucket;
1629 }
1630 }
1631 else if (__x._M_uses_single_bucket())
1632 {
1633 __x._M_buckets = _M_buckets;
1634 _M_buckets = &_M_single_bucket;
1635 }
1636 else
1637 std::swap(_M_buckets, __x._M_buckets);
1638
1639 std::swap(_M_bucket_count, __x._M_bucket_count);
1640 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1641 std::swap(_M_element_count, __x._M_element_count);
1642 std::swap(_M_single_bucket, __x._M_single_bucket);
1643
1644 // Fix buckets containing the _M_before_begin pointers that can't be
1645 // swapped.
1646 _M_update_bbegin();
1647 __x._M_update_bbegin();
1648 }
1649
1650 template<typename _Key, typename _Value, typename _Alloc,
1651 typename _ExtractKey, typename _Equal,
1652 typename _Hash, typename _RangeHash, typename _Unused,
1653 typename _RehashPolicy, typename _Traits>
1654 auto
1655 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1656 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1657 find(const key_type& __k)
1658 -> iterator
1659 {
1660 if (size() <= __small_size_threshold())
1661 {
1662 for (auto __it = begin(); __it != end(); ++__it)
1663 if (this->_M_key_equals(__k, *__it._M_cur))
1664 return __it;
1665 return end();
1666 }
1667
1668 __hash_code __code = this->_M_hash_code(__k);
1669 std::size_t __bkt = _M_bucket_index(__code);
1670 return iterator(_M_find_node(__bkt, __k, __code));
1671 }
1672
1673 template<typename _Key, typename _Value, typename _Alloc,
1674 typename _ExtractKey, typename _Equal,
1675 typename _Hash, typename _RangeHash, typename _Unused,
1676 typename _RehashPolicy, typename _Traits>
1677 auto
1678 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1679 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1680 find(const key_type& __k) const
1681 -> const_iterator
1682 {
1683 if (size() <= __small_size_threshold())
1684 {
1685 for (auto __it = begin(); __it != end(); ++__it)
1686 if (this->_M_key_equals(__k, *__it._M_cur))
1687 return __it;
1688 return end();
1689 }
1690
1691 __hash_code __code = this->_M_hash_code(__k);
1692 std::size_t __bkt = _M_bucket_index(__code);
1693 return const_iterator(_M_find_node(__bkt, __k, __code));
1694 }
1695
1696 #if __cplusplus > 201703L
1697 template<typename _Key, typename _Value, typename _Alloc,
1698 typename _ExtractKey, typename _Equal,
1699 typename _Hash, typename _RangeHash, typename _Unused,
1700 typename _RehashPolicy, typename _Traits>
1701 template<typename _Kt, typename, typename>
1702 auto
1703 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1704 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1705 _M_find_tr(const _Kt& __k)
1706 -> iterator
1707 {
1708 __hash_code __code = this->_M_hash_code_tr(__k);
1709 std::size_t __bkt = _M_bucket_index(__code);
1710 return iterator(_M_find_node_tr(__bkt, __k, __code));
1711 }
1712
1713 template<typename _Key, typename _Value, typename _Alloc,
1714 typename _ExtractKey, typename _Equal,
1715 typename _Hash, typename _RangeHash, typename _Unused,
1716 typename _RehashPolicy, typename _Traits>
1717 template<typename _Kt, typename, typename>
1718 auto
1719 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1720 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1721 _M_find_tr(const _Kt& __k) const
1722 -> const_iterator
1723 {
1724 __hash_code __code = this->_M_hash_code_tr(__k);
1725 std::size_t __bkt = _M_bucket_index(__code);
1726 return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1727 }
1728 #endif
1729
1730 template<typename _Key, typename _Value, typename _Alloc,
1731 typename _ExtractKey, typename _Equal,
1732 typename _Hash, typename _RangeHash, typename _Unused,
1733 typename _RehashPolicy, typename _Traits>
1734 auto
1735 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1736 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1737 count(const key_type& __k) const
1738 -> size_type
1739 {
1740 auto __it = find(__k);
1741 if (!__it._M_cur)
1742 return 0;
1743
1744 if (__unique_keys::value)
1745 return 1;
1746
1747 // All equivalent values are next to each other, if we find a
1748 // non-equivalent value after an equivalent one it means that we won't
1749 // find any new equivalent value.
1750 size_type __result = 1;
1751 for (auto __ref = __it++;
1752 __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1753 ++__it)
1754 ++__result;
1755
1756 return __result;
1757 }
1758
1759 #if __cplusplus > 201703L
1760 template<typename _Key, typename _Value, typename _Alloc,
1761 typename _ExtractKey, typename _Equal,
1762 typename _Hash, typename _RangeHash, typename _Unused,
1763 typename _RehashPolicy, typename _Traits>
1764 template<typename _Kt, typename, typename>
1765 auto
1766 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1767 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1768 _M_count_tr(const _Kt& __k) const
1769 -> size_type
1770 {
1771 __hash_code __code = this->_M_hash_code_tr(__k);
1772 std::size_t __bkt = _M_bucket_index(__code);
1773 auto __n = _M_find_node_tr(__bkt, __k, __code);
1774 if (!__n)
1775 return 0;
1776
1777 // All equivalent values are next to each other, if we find a
1778 // non-equivalent value after an equivalent one it means that we won't
1779 // find any new equivalent value.
1780 iterator __it(__n);
1781 size_type __result = 1;
1782 for (++__it;
1783 __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1784 ++__it)
1785 ++__result;
1786
1787 return __result;
1788 }
1789 #endif
1790
1791 template<typename _Key, typename _Value, typename _Alloc,
1792 typename _ExtractKey, typename _Equal,
1793 typename _Hash, typename _RangeHash, typename _Unused,
1794 typename _RehashPolicy, typename _Traits>
1795 auto
1796 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1797 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1798 equal_range(const key_type& __k)
1799 -> pair<iterator, iterator>
1800 {
1801 auto __ite = find(__k);
1802 if (!__ite._M_cur)
1803 return { __ite, __ite };
1804
1805 auto __beg = __ite++;
1806 if (__unique_keys::value)
1807 return { __beg, __ite };
1808
1809 // All equivalent values are next to each other, if we find a
1810 // non-equivalent value after an equivalent one it means that we won't
1811 // find any new equivalent value.
1812 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1813 ++__ite;
1814
1815 return { __beg, __ite };
1816 }
1817
1818 template<typename _Key, typename _Value, typename _Alloc,
1819 typename _ExtractKey, typename _Equal,
1820 typename _Hash, typename _RangeHash, typename _Unused,
1821 typename _RehashPolicy, typename _Traits>
1822 auto
1823 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1824 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1825 equal_range(const key_type& __k) const
1826 -> pair<const_iterator, const_iterator>
1827 {
1828 auto __ite = find(__k);
1829 if (!__ite._M_cur)
1830 return { __ite, __ite };
1831
1832 auto __beg = __ite++;
1833 if (__unique_keys::value)
1834 return { __beg, __ite };
1835
1836 // All equivalent values are next to each other, if we find a
1837 // non-equivalent value after an equivalent one it means that we won't
1838 // find any new equivalent value.
1839 while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1840 ++__ite;
1841
1842 return { __beg, __ite };
1843 }
1844
1845 #if __cplusplus > 201703L
1846 template<typename _Key, typename _Value, typename _Alloc,
1847 typename _ExtractKey, typename _Equal,
1848 typename _Hash, typename _RangeHash, typename _Unused,
1849 typename _RehashPolicy, typename _Traits>
1850 template<typename _Kt, typename, typename>
1851 auto
1852 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1853 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1854 _M_equal_range_tr(const _Kt& __k)
1855 -> pair<iterator, iterator>
1856 {
1857 __hash_code __code = this->_M_hash_code_tr(__k);
1858 std::size_t __bkt = _M_bucket_index(__code);
1859 auto __n = _M_find_node_tr(__bkt, __k, __code);
1860 iterator __ite(__n);
1861 if (!__n)
1862 return { __ite, __ite };
1863
1864 // All equivalent values are next to each other, if we find a
1865 // non-equivalent value after an equivalent one it means that we won't
1866 // find any new equivalent value.
1867 auto __beg = __ite++;
1868 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1869 ++__ite;
1870
1871 return { __beg, __ite };
1872 }
1873
1874 template<typename _Key, typename _Value, typename _Alloc,
1875 typename _ExtractKey, typename _Equal,
1876 typename _Hash, typename _RangeHash, typename _Unused,
1877 typename _RehashPolicy, typename _Traits>
1878 template<typename _Kt, typename, typename>
1879 auto
1880 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1881 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1882 _M_equal_range_tr(const _Kt& __k) const
1883 -> pair<const_iterator, const_iterator>
1884 {
1885 __hash_code __code = this->_M_hash_code_tr(__k);
1886 std::size_t __bkt = _M_bucket_index(__code);
1887 auto __n = _M_find_node_tr(__bkt, __k, __code);
1888 const_iterator __ite(__n);
1889 if (!__n)
1890 return { __ite, __ite };
1891
1892 // All equivalent values are next to each other, if we find a
1893 // non-equivalent value after an equivalent one it means that we won't
1894 // find any new equivalent value.
1895 auto __beg = __ite++;
1896 while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1897 ++__ite;
1898
1899 return { __beg, __ite };
1900 }
1901 #endif
1902
1903 // Find the node before the one whose key compares equal to k.
1904 // Return nullptr if no node is found.
1905 template<typename _Key, typename _Value, typename _Alloc,
1906 typename _ExtractKey, typename _Equal,
1907 typename _Hash, typename _RangeHash, typename _Unused,
1908 typename _RehashPolicy, typename _Traits>
1909 auto
1910 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1911 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1912 _M_find_before_node(const key_type& __k)
1913 -> __node_base_ptr
1914 {
1915 __node_base_ptr __prev_p = &_M_before_begin;
1916 if (!__prev_p->_M_nxt)
1917 return nullptr;
1918
1919 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);
1920 __p != nullptr;
1921 __p = __p->_M_next())
1922 {
1923 if (this->_M_key_equals(__k, *__p))
1924 return __prev_p;
1925
1926 __prev_p = __p;
1927 }
1928
1929 return nullptr;
1930 }
1931
1932 // Find the node before the one whose key compares equal to k in the bucket
1933 // bkt. Return nullptr if no node is found.
1934 template<typename _Key, typename _Value, typename _Alloc,
1935 typename _ExtractKey, typename _Equal,
1936 typename _Hash, typename _RangeHash, typename _Unused,
1937 typename _RehashPolicy, typename _Traits>
1938 auto
1939 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1940 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1941 _M_find_before_node(size_type __bkt, const key_type& __k,
1942 __hash_code __code) const
1943 -> __node_base_ptr
1944 {
1945 __node_base_ptr __prev_p = _M_buckets[__bkt];
1946 if (!__prev_p)
1947 return nullptr;
1948
1949 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1950 __p = __p->_M_next())
1951 {
1952 if (this->_M_equals(__k, __code, *__p))
1953 return __prev_p;
1954
1955 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1956 break;
1957 __prev_p = __p;
1958 }
1959
1960 return nullptr;
1961 }
1962
1963 template<typename _Key, typename _Value, typename _Alloc,
1964 typename _ExtractKey, typename _Equal,
1965 typename _Hash, typename _RangeHash, typename _Unused,
1966 typename _RehashPolicy, typename _Traits>
1967 template<typename _Kt>
1968 auto
1969 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1970 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1971 _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
1972 __hash_code __code) const
1973 -> __node_base_ptr
1974 {
1975 __node_base_ptr __prev_p = _M_buckets[__bkt];
1976 if (!__prev_p)
1977 return nullptr;
1978
1979 for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1980 __p = __p->_M_next())
1981 {
1982 if (this->_M_equals_tr(__k, __code, *__p))
1983 return __prev_p;
1984
1985 if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1986 break;
1987 __prev_p = __p;
1988 }
1989
1990 return nullptr;
1991 }
1992
1993 template<typename _Key, typename _Value, typename _Alloc,
1994 typename _ExtractKey, typename _Equal,
1995 typename _Hash, typename _RangeHash, typename _Unused,
1996 typename _RehashPolicy, typename _Traits>
1997 void
1998 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1999 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2000 _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
2001 {
2002 if (_M_buckets[__bkt])
2003 {
2004 // Bucket is not empty, we just need to insert the new node
2005 // after the bucket before begin.
2006 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
2007 _M_buckets[__bkt]->_M_nxt = __node;
2008 }
2009 else
2010 {
2011 // The bucket is empty, the new node is inserted at the
2012 // beginning of the singly-linked list and the bucket will
2013 // contain _M_before_begin pointer.
2014 __node->_M_nxt = _M_before_begin._M_nxt;
2015 _M_before_begin._M_nxt = __node;
2016
2017 if (__node->_M_nxt)
2018 // We must update former begin bucket that is pointing to
2019 // _M_before_begin.
2020 _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
2021
2022 _M_buckets[__bkt] = &_M_before_begin;
2023 }
2024 }
2025
2026 template<typename _Key, typename _Value, typename _Alloc,
2027 typename _ExtractKey, typename _Equal,
2028 typename _Hash, typename _RangeHash, typename _Unused,
2029 typename _RehashPolicy, typename _Traits>
2030 void
2031 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2032 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2033 _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
2034 size_type __next_bkt)
2035 {
2036 if (!__next || __next_bkt != __bkt)
2037 {
2038 // Bucket is now empty
2039 // First update next bucket if any
2040 if (__next)
2041 _M_buckets[__next_bkt] = _M_buckets[__bkt];
2042
2043 // Second update before begin node if necessary
2044 if (&_M_before_begin == _M_buckets[__bkt])
2045 _M_before_begin._M_nxt = __next;
2046 _M_buckets[__bkt] = nullptr;
2047 }
2048 }
2049
2050 template<typename _Key, typename _Value, typename _Alloc,
2051 typename _ExtractKey, typename _Equal,
2052 typename _Hash, typename _RangeHash, typename _Unused,
2053 typename _RehashPolicy, typename _Traits>
2054 auto
2055 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2056 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2057 _M_get_previous_node(size_type __bkt, __node_ptr __n)
2058 -> __node_base_ptr
2059 {
2060 __node_base_ptr __prev_n = _M_buckets[__bkt];
2061 while (__prev_n->_M_nxt != __n)
2062 __prev_n = __prev_n->_M_nxt;
2063 return __prev_n;
2064 }
2065
2066 template<typename _Key, typename _Value, typename _Alloc,
2067 typename _ExtractKey, typename _Equal,
2068 typename _Hash, typename _RangeHash, typename _Unused,
2069 typename _RehashPolicy, typename _Traits>
2070 template<typename... _Args>
2071 auto
2072 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2073 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2074 _M_emplace(true_type /* __uks */, _Args&&... __args)
2075 -> pair<iterator, bool>
2076 {
2077 // First build the node to get access to the hash code
2078 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2079 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2080 if (size() <= __small_size_threshold())
2081 {
2082 for (auto __it = begin(); __it != end(); ++__it)
2083 if (this->_M_key_equals(__k, *__it._M_cur))
2084 // There is already an equivalent node, no insertion
2085 return { __it, false };
2086 }
2087
2088 __hash_code __code = this->_M_hash_code(__k);
2089 size_type __bkt = _M_bucket_index(__code);
2090 if (size() > __small_size_threshold())
2091 if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
2092 // There is already an equivalent node, no insertion
2093 return { iterator(__p), false };
2094
2095 // Insert the node
2096 auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2097 __node._M_node = nullptr;
2098 return { __pos, true };
2099 }
2100
2101 template<typename _Key, typename _Value, typename _Alloc,
2102 typename _ExtractKey, typename _Equal,
2103 typename _Hash, typename _RangeHash, typename _Unused,
2104 typename _RehashPolicy, typename _Traits>
2105 template<typename... _Args>
2106 auto
2107 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2108 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2109 _M_emplace(const_iterator __hint, false_type /* __uks */,
2110 _Args&&... __args)
2111 -> iterator
2112 {
2113 // First build the node to get its hash code.
2114 _Scoped_node __node { this, std::forward<_Args>(__args)... };
2115 const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2116
2117 auto __res = this->_M_compute_hash_code(__hint, __k);
2118 auto __pos
2119 = _M_insert_multi_node(__res.first._M_cur, __res.second,
2120 __node._M_node);
2121 __node._M_node = nullptr;
2122 return __pos;
2123 }
2124
2125 template<typename _Key, typename _Value, typename _Alloc,
2126 typename _ExtractKey, typename _Equal,
2127 typename _Hash, typename _RangeHash, typename _Unused,
2128 typename _RehashPolicy, typename _Traits>
2129 auto
2130 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2131 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2132 _M_compute_hash_code(const_iterator __hint, const key_type& __k) const
2133 -> pair<const_iterator, __hash_code>
2134 {
2135 if (size() <= __small_size_threshold())
2136 {
2137 if (__hint != cend())
2138 {
2139 for (auto __it = __hint; __it != cend(); ++__it)
2140 if (this->_M_key_equals(__k, *__it._M_cur))
2141 return { __it, this->_M_hash_code(*__it._M_cur) };
2142 }
2143
2144 for (auto __it = cbegin(); __it != __hint; ++__it)
2145 if (this->_M_key_equals(__k, *__it._M_cur))
2146 return { __it, this->_M_hash_code(*__it._M_cur) };
2147 }
2148
2149 return { __hint, this->_M_hash_code(__k) };
2150 }
2151
2152 template<typename _Key, typename _Value, typename _Alloc,
2153 typename _ExtractKey, typename _Equal,
2154 typename _Hash, typename _RangeHash, typename _Unused,
2155 typename _RehashPolicy, typename _Traits>
2156 auto
2157 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2158 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2159 _M_insert_unique_node(size_type __bkt, __hash_code __code,
2160 __node_ptr __node, size_type __n_elt)
2161 -> iterator
2162 {
2163 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2164 std::pair<bool, std::size_t> __do_rehash
2165 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2166 __n_elt);
2167
2168 if (__do_rehash.first)
2169 {
2170 _M_rehash(__do_rehash.second, __saved_state);
2171 __bkt = _M_bucket_index(__code);
2172 }
2173
2174 this->_M_store_code(*__node, __code);
2175
2176 // Always insert at the beginning of the bucket.
2177 _M_insert_bucket_begin(__bkt, __node);
2178 ++_M_element_count;
2179 return iterator(__node);
2180 }
2181
2182 template<typename _Key, typename _Value, typename _Alloc,
2183 typename _ExtractKey, typename _Equal,
2184 typename _Hash, typename _RangeHash, typename _Unused,
2185 typename _RehashPolicy, typename _Traits>
2186 auto
2187 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2188 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2189 _M_insert_multi_node(__node_ptr __hint,
2190 __hash_code __code, __node_ptr __node)
2191 -> iterator
2192 {
2193 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2194 std::pair<bool, std::size_t> __do_rehash
2195 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2196
2197 if (__do_rehash.first)
2198 _M_rehash(__do_rehash.second, __saved_state);
2199
2200 this->_M_store_code(*__node, __code);
2201 const key_type& __k = _ExtractKey{}(__node->_M_v());
2202 size_type __bkt = _M_bucket_index(__code);
2203
2204 // Find the node before an equivalent one or use hint if it exists and
2205 // if it is equivalent.
2206 __node_base_ptr __prev
2207 = __builtin_expect(__hint != nullptr, false)
2208 && this->_M_equals(__k, __code, *__hint)
2209 ? __hint
2210 : _M_find_before_node(__bkt, __k, __code);
2211
2212 if (__prev)
2213 {
2214 // Insert after the node before the equivalent one.
2215 __node->_M_nxt = __prev->_M_nxt;
2216 __prev->_M_nxt = __node;
2217 if (__builtin_expect(__prev == __hint, false))
2218 // hint might be the last bucket node, in this case we need to
2219 // update next bucket.
2220 if (__node->_M_nxt
2221 && !this->_M_equals(__k, __code, *__node->_M_next()))
2222 {
2223 size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2224 if (__next_bkt != __bkt)
2225 _M_buckets[__next_bkt] = __node;
2226 }
2227 }
2228 else
2229 // The inserted node has no equivalent in the hashtable. We must
2230 // insert the new node at the beginning of the bucket to preserve
2231 // equivalent elements' relative positions.
2232 _M_insert_bucket_begin(__bkt, __node);
2233 ++_M_element_count;
2234 return iterator(__node);
2235 }
2236
2237 // Insert v if no element with its key is already present.
2238 template<typename _Key, typename _Value, typename _Alloc,
2239 typename _ExtractKey, typename _Equal,
2240 typename _Hash, typename _RangeHash, typename _Unused,
2241 typename _RehashPolicy, typename _Traits>
2242 template<typename _Kt, typename _Arg, typename _NodeGenerator>
2243 auto
2244 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2245 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2246 _M_insert_unique(_Kt&& __k, _Arg&& __v,
2247 const _NodeGenerator& __node_gen)
2248 -> pair<iterator, bool>
2249 {
2250 if (size() <= __small_size_threshold())
2251 for (auto __it = begin(); __it != end(); ++__it)
2252 if (this->_M_key_equals_tr(__k, *__it._M_cur))
2253 return { __it, false };
2254
2255 __hash_code __code = this->_M_hash_code_tr(__k);
2256 size_type __bkt = _M_bucket_index(__code);
2257
2258 if (size() > __small_size_threshold())
2259 if (__node_ptr __node = _M_find_node_tr(__bkt, __k, __code))
2260 return { iterator(__node), false };
2261
2262 _Scoped_node __node {
2263 __node_builder_t::_S_build(std::forward<_Kt>(__k),
2264 std::forward<_Arg>(__v),
2265 __node_gen),
2266 this
2267 };
2268 auto __pos
2269 = _M_insert_unique_node(__bkt, __code, __node._M_node);
2270 __node._M_node = nullptr;
2271 return { __pos, true };
2272 }
2273
2274 // Insert v unconditionally.
2275 template<typename _Key, typename _Value, typename _Alloc,
2276 typename _ExtractKey, typename _Equal,
2277 typename _Hash, typename _RangeHash, typename _Unused,
2278 typename _RehashPolicy, typename _Traits>
2279 template<typename _Arg, typename _NodeGenerator>
2280 auto
2281 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2282 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2283 _M_insert(const_iterator __hint, _Arg&& __v,
2284 const _NodeGenerator& __node_gen,
2285 false_type /* __uks */)
2286 -> iterator
2287 {
2288 // First allocate new node so that we don't do anything if it throws.
2289 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2290
2291 // Second compute the hash code so that we don't rehash if it throws.
2292 auto __res = this->_M_compute_hash_code(
2293 __hint, _ExtractKey{}(__node._M_node->_M_v()));
2294
2295 auto __pos
2296 = _M_insert_multi_node(__res.first._M_cur, __res.second,
2297 __node._M_node);
2298 __node._M_node = nullptr;
2299 return __pos;
2300 }
2301
2302 template<typename _Key, typename _Value, typename _Alloc,
2303 typename _ExtractKey, typename _Equal,
2304 typename _Hash, typename _RangeHash, typename _Unused,
2305 typename _RehashPolicy, typename _Traits>
2306 auto
2307 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2308 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2309 erase(const_iterator __it)
2310 -> iterator
2311 {
2312 __node_ptr __n = __it._M_cur;
2313 std::size_t __bkt = _M_bucket_index(*__n);
2314
2315 // Look for previous node to unlink it from the erased one, this
2316 // is why we need buckets to contain the before begin to make
2317 // this search fast.
2318 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2319 return _M_erase(__bkt, __prev_n, __n);
2320 }
2321
2322 template<typename _Key, typename _Value, typename _Alloc,
2323 typename _ExtractKey, typename _Equal,
2324 typename _Hash, typename _RangeHash, typename _Unused,
2325 typename _RehashPolicy, typename _Traits>
2326 auto
2327 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2328 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2329 _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2330 -> iterator
2331 {
2332 if (__prev_n == _M_buckets[__bkt])
2333 _M_remove_bucket_begin(__bkt, __n->_M_next(),
2334 __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2335 else if (__n->_M_nxt)
2336 {
2337 size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2338 if (__next_bkt != __bkt)
2339 _M_buckets[__next_bkt] = __prev_n;
2340 }
2341
2342 __prev_n->_M_nxt = __n->_M_nxt;
2343 iterator __result(__n->_M_next());
2344 this->_M_deallocate_node(__n);
2345 --_M_element_count;
2346
2347 return __result;
2348 }
2349
2350 template<typename _Key, typename _Value, typename _Alloc,
2351 typename _ExtractKey, typename _Equal,
2352 typename _Hash, typename _RangeHash, typename _Unused,
2353 typename _RehashPolicy, typename _Traits>
2354 auto
2355 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2356 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2357 _M_erase(true_type /* __uks */, const key_type& __k)
2358 -> size_type
2359 {
2360 __node_base_ptr __prev_n;
2361 __node_ptr __n;
2362 std::size_t __bkt;
2363 if (size() <= __small_size_threshold())
2364 {
2365 __prev_n = _M_find_before_node(__k);
2366 if (!__prev_n)
2367 return 0;
2368
2369 // We found a matching node, erase it.
2370 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2371 __bkt = _M_bucket_index(*__n);
2372 }
2373 else
2374 {
2375 __hash_code __code = this->_M_hash_code(__k);
2376 __bkt = _M_bucket_index(__code);
2377
2378 // Look for the node before the first matching node.
2379 __prev_n = _M_find_before_node(__bkt, __k, __code);
2380 if (!__prev_n)
2381 return 0;
2382
2383 // We found a matching node, erase it.
2384 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2385 }
2386
2387 _M_erase(__bkt, __prev_n, __n);
2388 return 1;
2389 }
2390
2391 template<typename _Key, typename _Value, typename _Alloc,
2392 typename _ExtractKey, typename _Equal,
2393 typename _Hash, typename _RangeHash, typename _Unused,
2394 typename _RehashPolicy, typename _Traits>
2395 auto
2396 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2397 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2398 _M_erase(false_type /* __uks */, const key_type& __k)
2399 -> size_type
2400 {
2401 std::size_t __bkt;
2402 __node_base_ptr __prev_n;
2403 __node_ptr __n;
2404 if (size() <= __small_size_threshold())
2405 {
2406 __prev_n = _M_find_before_node(__k);
2407 if (!__prev_n)
2408 return 0;
2409
2410 // We found a matching node, erase it.
2411 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2412 __bkt = _M_bucket_index(*__n);
2413 }
2414 else
2415 {
2416 __hash_code __code = this->_M_hash_code(__k);
2417 __bkt = _M_bucket_index(__code);
2418
2419 // Look for the node before the first matching node.
2420 __prev_n = _M_find_before_node(__bkt, __k, __code);
2421 if (!__prev_n)
2422 return 0;
2423
2424 __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2425 }
2426
2427 // _GLIBCXX_RESOLVE_LIB_DEFECTS
2428 // 526. Is it undefined if a function in the standard changes
2429 // in parameters?
2430 // We use one loop to find all matching nodes and another to deallocate
2431 // them so that the key stays valid during the first loop. It might be
2432 // invalidated indirectly when destroying nodes.
2433 __node_ptr __n_last = __n->_M_next();
2434 while (__n_last && this->_M_node_equals(*__n, *__n_last))
2435 __n_last = __n_last->_M_next();
2436
2437 std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2438
2439 // Deallocate nodes.
2440 size_type __result = 0;
2441 do
2442 {
2443 __node_ptr __p = __n->_M_next();
2444 this->_M_deallocate_node(__n);
2445 __n = __p;
2446 ++__result;
2447 }
2448 while (__n != __n_last);
2449
2450 _M_element_count -= __result;
2451 if (__prev_n == _M_buckets[__bkt])
2452 _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2453 else if (__n_last_bkt != __bkt)
2454 _M_buckets[__n_last_bkt] = __prev_n;
2455 __prev_n->_M_nxt = __n_last;
2456 return __result;
2457 }
2458
2459 template<typename _Key, typename _Value, typename _Alloc,
2460 typename _ExtractKey, typename _Equal,
2461 typename _Hash, typename _RangeHash, typename _Unused,
2462 typename _RehashPolicy, typename _Traits>
2463 auto
2464 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2465 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2466 erase(const_iterator __first, const_iterator __last)
2467 -> iterator
2468 {
2469 __node_ptr __n = __first._M_cur;
2470 __node_ptr __last_n = __last._M_cur;
2471 if (__n == __last_n)
2472 return iterator(__n);
2473
2474 std::size_t __bkt = _M_bucket_index(*__n);
2475
2476 __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2477 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2478 std::size_t __n_bkt = __bkt;
2479 for (;;)
2480 {
2481 do
2482 {
2483 __node_ptr __tmp = __n;
2484 __n = __n->_M_next();
2485 this->_M_deallocate_node(__tmp);
2486 --_M_element_count;
2487 if (!__n)
2488 break;
2489 __n_bkt = _M_bucket_index(*__n);
2490 }
2491 while (__n != __last_n && __n_bkt == __bkt);
2492 if (__is_bucket_begin)
2493 _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2494 if (__n == __last_n)
2495 break;
2496 __is_bucket_begin = true;
2497 __bkt = __n_bkt;
2498 }
2499
2500 if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2501 _M_buckets[__n_bkt] = __prev_n;
2502 __prev_n->_M_nxt = __n;
2503 return iterator(__n);
2504 }
2505
2506 template<typename _Key, typename _Value, typename _Alloc,
2507 typename _ExtractKey, typename _Equal,
2508 typename _Hash, typename _RangeHash, typename _Unused,
2509 typename _RehashPolicy, typename _Traits>
2510 void
2511 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2512 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2513 clear() noexcept
2514 {
2515 this->_M_deallocate_nodes(_M_begin());
2516 __builtin_memset(_M_buckets, 0,
2517 _M_bucket_count * sizeof(__node_base_ptr));
2518 _M_element_count = 0;
2519 _M_before_begin._M_nxt = nullptr;
2520 }
2521
2522 template<typename _Key, typename _Value, typename _Alloc,
2523 typename _ExtractKey, typename _Equal,
2524 typename _Hash, typename _RangeHash, typename _Unused,
2525 typename _RehashPolicy, typename _Traits>
2526 void
2527 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2528 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2529 rehash(size_type __bkt_count)
2530 {
2531 const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2532 __bkt_count
2533 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2534 __bkt_count);
2535 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2536
2537 if (__bkt_count != _M_bucket_count)
2538 _M_rehash(__bkt_count, __saved_state);
2539 else
2540 // No rehash, restore previous state to keep it consistent with
2541 // container state.
2542 _M_rehash_policy._M_reset(__saved_state);
2543 }
2544
2545 template<typename _Key, typename _Value, typename _Alloc,
2546 typename _ExtractKey, typename _Equal,
2547 typename _Hash, typename _RangeHash, typename _Unused,
2548 typename _RehashPolicy, typename _Traits>
2549 void
2550 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2551 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2552 _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2553 {
2554 __try
2555 {
2556 _M_rehash_aux(__bkt_count, __unique_keys{});
2557 }
2558 __catch(...)
2559 {
2560 // A failure here means that buckets allocation failed. We only
2561 // have to restore hash policy previous state.
2562 _M_rehash_policy._M_reset(__state);
2563 __throw_exception_again;
2564 }
2565 }
2566
2567 // Rehash when there is no equivalent elements.
2568 template<typename _Key, typename _Value, typename _Alloc,
2569 typename _ExtractKey, typename _Equal,
2570 typename _Hash, typename _RangeHash, typename _Unused,
2571 typename _RehashPolicy, typename _Traits>
2572 void
2573 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2574 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2575 _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2576 {
2577 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2578 __node_ptr __p = _M_begin();
2579 _M_before_begin._M_nxt = nullptr;
2580 std::size_t __bbegin_bkt = 0;
2581 while (__p)
2582 {
2583 __node_ptr __next = __p->_M_next();
2584 std::size_t __bkt
2585 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2586 if (!__new_buckets[__bkt])
2587 {
2588 __p->_M_nxt = _M_before_begin._M_nxt;
2589 _M_before_begin._M_nxt = __p;
2590 __new_buckets[__bkt] = &_M_before_begin;
2591 if (__p->_M_nxt)
2592 __new_buckets[__bbegin_bkt] = __p;
2593 __bbegin_bkt = __bkt;
2594 }
2595 else
2596 {
2597 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2598 __new_buckets[__bkt]->_M_nxt = __p;
2599 }
2600
2601 __p = __next;
2602 }
2603
2604 _M_deallocate_buckets();
2605 _M_bucket_count = __bkt_count;
2606 _M_buckets = __new_buckets;
2607 }
2608
2609 // Rehash when there can be equivalent elements, preserve their relative
2610 // order.
2611 template<typename _Key, typename _Value, typename _Alloc,
2612 typename _ExtractKey, typename _Equal,
2613 typename _Hash, typename _RangeHash, typename _Unused,
2614 typename _RehashPolicy, typename _Traits>
2615 void
2616 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2617 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2618 _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2619 {
2620 __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2621 __node_ptr __p = _M_begin();
2622 _M_before_begin._M_nxt = nullptr;
2623 std::size_t __bbegin_bkt = 0;
2624 std::size_t __prev_bkt = 0;
2625 __node_ptr __prev_p = nullptr;
2626 bool __check_bucket = false;
2627
2628 while (__p)
2629 {
2630 __node_ptr __next = __p->_M_next();
2631 std::size_t __bkt
2632 = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2633
2634 if (__prev_p && __prev_bkt == __bkt)
2635 {
2636 // Previous insert was already in this bucket, we insert after
2637 // the previously inserted one to preserve equivalent elements
2638 // relative order.
2639 __p->_M_nxt = __prev_p->_M_nxt;
2640 __prev_p->_M_nxt = __p;
2641
2642 // Inserting after a node in a bucket require to check that we
2643 // haven't change the bucket last node, in this case next
2644 // bucket containing its before begin node must be updated. We
2645 // schedule a check as soon as we move out of the sequence of
2646 // equivalent nodes to limit the number of checks.
2647 __check_bucket = true;
2648 }
2649 else
2650 {
2651 if (__check_bucket)
2652 {
2653 // Check if we shall update the next bucket because of
2654 // insertions into __prev_bkt bucket.
2655 if (__prev_p->_M_nxt)
2656 {
2657 std::size_t __next_bkt
2658 = __hash_code_base::_M_bucket_index(
2659 *__prev_p->_M_next(), __bkt_count);
2660 if (__next_bkt != __prev_bkt)
2661 __new_buckets[__next_bkt] = __prev_p;
2662 }
2663 __check_bucket = false;
2664 }
2665
2666 if (!__new_buckets[__bkt])
2667 {
2668 __p->_M_nxt = _M_before_begin._M_nxt;
2669 _M_before_begin._M_nxt = __p;
2670 __new_buckets[__bkt] = &_M_before_begin;
2671 if (__p->_M_nxt)
2672 __new_buckets[__bbegin_bkt] = __p;
2673 __bbegin_bkt = __bkt;
2674 }
2675 else
2676 {
2677 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2678 __new_buckets[__bkt]->_M_nxt = __p;
2679 }
2680 }
2681 __prev_p = __p;
2682 __prev_bkt = __bkt;
2683 __p = __next;
2684 }
2685
2686 if (__check_bucket && __prev_p->_M_nxt)
2687 {
2688 std::size_t __next_bkt
2689 = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2690 __bkt_count);
2691 if (__next_bkt != __prev_bkt)
2692 __new_buckets[__next_bkt] = __prev_p;
2693 }
2694
2695 _M_deallocate_buckets();
2696 _M_bucket_count = __bkt_count;
2697 _M_buckets = __new_buckets;
2698 }
2699
2700 #if __cplusplus > 201402L
2701 template<typename, typename, typename> class _Hash_merge_helper { };
2702 #endif // C++17
2703
2704 #if __cpp_deduction_guides >= 201606
2705 // Used to constrain deduction guides
2706 template<typename _Hash>
2707 using _RequireNotAllocatorOrIntegral
2708 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2709 #endif
2710
2711 /// @endcond
2712 _GLIBCXX_END_NAMESPACE_VERSION
2713 } // namespace std
2714
2715 #endif // _HASHTABLE_H
2716