1 // hashtable.h header -*- C++ -*- 2 3 // Copyright (C) 2007-2020 Free Software Foundation, Inc. 4 // 5 // This file is part of the GNU ISO C++ Library. This library is free 6 // software; you can redistribute it and/or modify it under the 7 // terms of the GNU General Public License as published by the 8 // Free Software Foundation; either version 3, or (at your option) 9 // any later version. 10 11 // This library is distributed in the hope that it will be useful, 12 // but WITHOUT ANY WARRANTY; without even the implied warranty of 13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 // GNU General Public License for more details. 15 16 // Under Section 7 of GPL version 3, you are granted additional 17 // permissions described in the GCC Runtime Library Exception, version 18 // 3.1, as published by the Free Software Foundation. 19 20 // You should have received a copy of the GNU General Public License and 21 // a copy of the GCC Runtime Library Exception along with this program; 22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23 // <http://www.gnu.org/licenses/>. 24 25 /** @file bits/hashtable.h 26 * This is an internal header file, included by other library headers. 27 * Do not attempt to use it directly. @headername{unordered_map, unordered_set} 28 */ 29 30 #ifndef _HASHTABLE_H 31 #define _HASHTABLE_H 1 32 33 #pragma GCC system_header 34 35 #include <bits/hashtable_policy.h> 36 #if __cplusplus > 201402L 37 # include <bits/node_handle.h> 38 #endif 39 40 namespace std _GLIBCXX_VISIBILITY(default) 41 { 42 _GLIBCXX_BEGIN_NAMESPACE_VERSION 43 44 template<typename _Tp, typename _Hash> 45 using __cache_default 46 = __not_<__and_<// Do not cache for fast hasher. 47 __is_fast_hash<_Hash>, 48 // Mandatory to have erase not throwing. 49 __is_nothrow_invocable<const _Hash&, const _Tp&>>>; 50 51 /** 52 * Primary class template _Hashtable. 53 * 54 * @ingroup hashtable-detail 55 * 56 * @tparam _Value CopyConstructible type. 57 * 58 * @tparam _Key CopyConstructible type. 59 * 60 * @tparam _Alloc An allocator type 61 * ([lib.allocator.requirements]) whose _Alloc::value_type is 62 * _Value. As a conforming extension, we allow for 63 * _Alloc::value_type != _Value. 64 * 65 * @tparam _ExtractKey Function object that takes an object of type 66 * _Value and returns a value of type _Key. 67 * 68 * @tparam _Equal Function object that takes two objects of type k 69 * and returns a bool-like value that is true if the two objects 70 * are considered equal. 71 * 72 * @tparam _H1 The hash function. A unary function object with 73 * argument type _Key and result type size_t. Return values should 74 * be distributed over the entire range [0, numeric_limits<size_t>:::max()]. 75 * 76 * @tparam _H2 The range-hashing function (in the terminology of 77 * Tavori and Dreizin). A binary function object whose argument 78 * types and result type are all size_t. Given arguments r and N, 79 * the return value is in the range [0, N). 80 * 81 * @tparam _Hash The ranged hash function (Tavori and Dreizin). A 82 * binary function whose argument types are _Key and size_t and 83 * whose result type is size_t. Given arguments k and N, the 84 * return value is in the range [0, N). Default: hash(k, N) = 85 * h2(h1(k), N). If _Hash is anything other than the default, _H1 86 * and _H2 are ignored. 87 * 88 * @tparam _RehashPolicy Policy class with three members, all of 89 * which govern the bucket count. _M_next_bkt(n) returns a bucket 90 * count no smaller than n. _M_bkt_for_elements(n) returns a 91 * bucket count appropriate for an element count of n. 92 * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the 93 * current bucket count is n_bkt and the current element count is 94 * n_elt, we need to increase the bucket count. If so, returns 95 * make_pair(true, n), where n is the new bucket count. If not, 96 * returns make_pair(false, <anything>) 97 * 98 * @tparam _Traits Compile-time class with three boolean 99 * std::integral_constant members: __cache_hash_code, __constant_iterators, 100 * __unique_keys. 101 * 102 * Each _Hashtable data structure has: 103 * 104 * - _Bucket[] _M_buckets 105 * - _Hash_node_base _M_before_begin 106 * - size_type _M_bucket_count 107 * - size_type _M_element_count 108 * 109 * with _Bucket being _Hash_node* and _Hash_node containing: 110 * 111 * - _Hash_node* _M_next 112 * - Tp _M_value 113 * - size_t _M_hash_code if cache_hash_code is true 114 * 115 * In terms of Standard containers the hashtable is like the aggregation of: 116 * 117 * - std::forward_list<_Node> containing the elements 118 * - std::vector<std::forward_list<_Node>::iterator> representing the buckets 119 * 120 * The non-empty buckets contain the node before the first node in the 121 * bucket. This design makes it possible to implement something like a 122 * std::forward_list::insert_after on container insertion and 123 * std::forward_list::erase_after on container erase 124 * calls. _M_before_begin is equivalent to 125 * std::forward_list::before_begin. Empty buckets contain 126 * nullptr. Note that one of the non-empty buckets contains 127 * &_M_before_begin which is not a dereferenceable node so the 128 * node pointer in a bucket shall never be dereferenced, only its 129 * next node can be. 130 * 131 * Walking through a bucket's nodes requires a check on the hash code to 132 * see if each node is still in the bucket. Such a design assumes a 133 * quite efficient hash functor and is one of the reasons it is 134 * highly advisable to set __cache_hash_code to true. 135 * 136 * The container iterators are simply built from nodes. This way 137 * incrementing the iterator is perfectly efficient independent of 138 * how many empty buckets there are in the container. 139 * 140 * On insert we compute the element's hash code and use it to find the 141 * bucket index. If the element must be inserted in an empty bucket 142 * we add it at the beginning of the singly linked list and make the 143 * bucket point to _M_before_begin. The bucket that used to point to 144 * _M_before_begin, if any, is updated to point to its new before 145 * begin node. 146 * 147 * On erase, the simple iterator design requires using the hash 148 * functor to get the index of the bucket to update. For this 149 * reason, when __cache_hash_code is set to false the hash functor must 150 * not throw and this is enforced by a static assertion. 151 * 152 * Functionality is implemented by decomposition into base classes, 153 * where the derived _Hashtable class is used in _Map_base, 154 * _Insert, _Rehash_base, and _Equality base classes to access the 155 * "this" pointer. _Hashtable_base is used in the base classes as a 156 * non-recursive, fully-completed-type so that detailed nested type 157 * information, such as iterator type and node type, can be 158 * used. This is similar to the "Curiously Recurring Template 159 * Pattern" (CRTP) technique, but uses a reconstructed, not 160 * explicitly passed, template pattern. 161 * 162 * Base class templates are: 163 * - __detail::_Hashtable_base 164 * - __detail::_Map_base 165 * - __detail::_Insert 166 * - __detail::_Rehash_base 167 * - __detail::_Equality 168 */ 169 template<typename _Key, typename _Value, typename _Alloc, 170 typename _ExtractKey, typename _Equal, 171 typename _H1, typename _H2, typename _Hash, 172 typename _RehashPolicy, typename _Traits> 173 class _Hashtable 174 : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal, 175 _H1, _H2, _Hash, _Traits>, 176 public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal, 177 _H1, _H2, _Hash, _RehashPolicy, _Traits>, 178 public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal, 179 _H1, _H2, _Hash, _RehashPolicy, _Traits>, 180 public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal, 181 _H1, _H2, _Hash, _RehashPolicy, _Traits>, 182 public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal, 183 _H1, _H2, _Hash, _RehashPolicy, _Traits>, 184 private __detail::_Hashtable_alloc< 185 __alloc_rebind<_Alloc, 186 __detail::_Hash_node<_Value, 187 _Traits::__hash_cached::value>>> 188 { 189 static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value, 190 "unordered container must have a non-const, non-volatile value_type"); 191 #if __cplusplus > 201703L || defined __STRICT_ANSI__ 192 static_assert(is_same<typename _Alloc::value_type, _Value>{}, 193 "unordered container must have the same value_type as its allocator"); 194 #endif 195 196 using __traits_type = _Traits; 197 using __hash_cached = typename __traits_type::__hash_cached; 198 using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>; 199 using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>; 200 201 using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>; 202 203 using __value_alloc_traits = 204 typename __hashtable_alloc::__value_alloc_traits; 205 using __node_alloc_traits = 206 typename __hashtable_alloc::__node_alloc_traits; 207 using __node_base = typename __hashtable_alloc::__node_base; 208 using __bucket_type = typename __hashtable_alloc::__bucket_type; 209 210 public: 211 typedef _Key key_type; 212 typedef _Value value_type; 213 typedef _Alloc allocator_type; 214 typedef _Equal key_equal; 215 216 // mapped_type, if present, comes from _Map_base. 217 // hasher, if present, comes from _Hash_code_base/_Hashtable_base. 218 typedef typename __value_alloc_traits::pointer pointer; 219 typedef typename __value_alloc_traits::const_pointer const_pointer; 220 typedef value_type& reference; 221 typedef const value_type& const_reference; 222 223 private: 224 using __rehash_type = _RehashPolicy; 225 using __rehash_state = typename __rehash_type::_State; 226 227 using __constant_iterators = typename __traits_type::__constant_iterators; 228 using __unique_keys = typename __traits_type::__unique_keys; 229 230 using __key_extract = typename std::conditional< 231 __constant_iterators::value, 232 __detail::_Identity, 233 __detail::_Select1st>::type; 234 235 using __hashtable_base = __detail:: 236 _Hashtable_base<_Key, _Value, _ExtractKey, 237 _Equal, _H1, _H2, _Hash, _Traits>; 238 239 using __hash_code_base = typename __hashtable_base::__hash_code_base; 240 using __hash_code = typename __hashtable_base::__hash_code; 241 using __ireturn_type = typename __hashtable_base::__ireturn_type; 242 243 using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, 244 _Equal, _H1, _H2, _Hash, 245 _RehashPolicy, _Traits>; 246 247 using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc, 248 _ExtractKey, _Equal, 249 _H1, _H2, _Hash, 250 _RehashPolicy, _Traits>; 251 252 using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, 253 _Equal, _H1, _H2, _Hash, 254 _RehashPolicy, _Traits>; 255 256 using __reuse_or_alloc_node_gen_t = 257 __detail::_ReuseOrAllocNode<__node_alloc_type>; 258 using __alloc_node_gen_t = 259 __detail::_AllocNode<__node_alloc_type>; 260 261 // Simple RAII type for managing a node containing an element 262 struct _Scoped_node 263 { 264 // Take ownership of a node with a constructed element. 265 _Scoped_node(__node_type* __n, __hashtable_alloc* __h) 266 : _M_h(__h), _M_node(__n) { } 267 268 // Allocate a node and construct an element within it. 269 template<typename... _Args> 270 _Scoped_node(__hashtable_alloc* __h, _Args&&... __args) 271 : _M_h(__h), 272 _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...)) 273 { } 274 275 // Destroy element and deallocate node. 276 ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); }; 277 278 _Scoped_node(const _Scoped_node&) = delete; 279 _Scoped_node& operator=(const _Scoped_node&) = delete; 280 281 __hashtable_alloc* _M_h; 282 __node_type* _M_node; 283 }; 284 285 template<typename _Ht> 286 static constexpr 287 typename conditional<std::is_lvalue_reference<_Ht>::value, 288 const value_type&, value_type&&>::type 289 __fwd_value_for(value_type& __val) noexcept 290 { return std::move(__val); } 291 292 // Metaprogramming for picking apart hash caching. 293 template<typename _Cond> 294 using __if_hash_cached = __or_<__not_<__hash_cached>, _Cond>; 295 296 template<typename _Cond> 297 using __if_hash_not_cached = __or_<__hash_cached, _Cond>; 298 299 // Compile-time diagnostics. 300 301 // _Hash_code_base has everything protected, so use this derived type to 302 // access it. 303 struct __hash_code_base_access : __hash_code_base 304 { using __hash_code_base::_M_bucket_index; }; 305 306 // Getting a bucket index from a node shall not throw because it is used 307 // in methods (erase, swap...) that shall not throw. 308 static_assert(noexcept(declval<const __hash_code_base_access&>() 309 ._M_bucket_index((const __node_type*)nullptr, 310 (std::size_t)0)), 311 "Cache the hash code or qualify your functors involved" 312 " in hash code and bucket index computation with noexcept"); 313 314 // When hash codes are cached local iterator inherits from H2 functor 315 // which must then be default constructible. 316 static_assert(__if_hash_cached<is_default_constructible<_H2>>::value, 317 "Functor used to map hash code to bucket index" 318 " must be default constructible"); 319 320 template<typename _Keya, typename _Valuea, typename _Alloca, 321 typename _ExtractKeya, typename _Equala, 322 typename _H1a, typename _H2a, typename _Hasha, 323 typename _RehashPolicya, typename _Traitsa, 324 bool _Unique_keysa> 325 friend struct __detail::_Map_base; 326 327 template<typename _Keya, typename _Valuea, typename _Alloca, 328 typename _ExtractKeya, typename _Equala, 329 typename _H1a, typename _H2a, typename _Hasha, 330 typename _RehashPolicya, typename _Traitsa> 331 friend struct __detail::_Insert_base; 332 333 template<typename _Keya, typename _Valuea, typename _Alloca, 334 typename _ExtractKeya, typename _Equala, 335 typename _H1a, typename _H2a, typename _Hasha, 336 typename _RehashPolicya, typename _Traitsa, 337 bool _Constant_iteratorsa> 338 friend struct __detail::_Insert; 339 340 template<typename _Keya, typename _Valuea, typename _Alloca, 341 typename _ExtractKeya, typename _Equala, 342 typename _H1a, typename _H2a, typename _Hasha, 343 typename _RehashPolicya, typename _Traitsa, 344 bool _Unique_keysa> 345 friend struct __detail::_Equality; 346 347 public: 348 using size_type = typename __hashtable_base::size_type; 349 using difference_type = typename __hashtable_base::difference_type; 350 351 using iterator = typename __hashtable_base::iterator; 352 using const_iterator = typename __hashtable_base::const_iterator; 353 354 using local_iterator = typename __hashtable_base::local_iterator; 355 using const_local_iterator = typename __hashtable_base:: 356 const_local_iterator; 357 358 #if __cplusplus > 201402L 359 using node_type = _Node_handle<_Key, _Value, __node_alloc_type>; 360 using insert_return_type = _Node_insert_return<iterator, node_type>; 361 #endif 362 363 private: 364 __bucket_type* _M_buckets = &_M_single_bucket; 365 size_type _M_bucket_count = 1; 366 __node_base _M_before_begin; 367 size_type _M_element_count = 0; 368 _RehashPolicy _M_rehash_policy; 369 370 // A single bucket used when only need for 1 bucket. Especially 371 // interesting in move semantic to leave hashtable with only 1 bucket 372 // which is not allocated so that we can have those operations noexcept 373 // qualified. 374 // Note that we can't leave hashtable with 0 bucket without adding 375 // numerous checks in the code to avoid 0 modulus. 376 __bucket_type _M_single_bucket = nullptr; 377 378 bool 379 _M_uses_single_bucket(__bucket_type* __bkts) const 380 { return __builtin_expect(__bkts == &_M_single_bucket, false); } 381 382 bool 383 _M_uses_single_bucket() const 384 { return _M_uses_single_bucket(_M_buckets); } 385 386 __hashtable_alloc& 387 _M_base_alloc() { return *this; } 388 389 __bucket_type* 390 _M_allocate_buckets(size_type __bkt_count) 391 { 392 if (__builtin_expect(__bkt_count == 1, false)) 393 { 394 _M_single_bucket = nullptr; 395 return &_M_single_bucket; 396 } 397 398 return __hashtable_alloc::_M_allocate_buckets(__bkt_count); 399 } 400 401 void 402 _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count) 403 { 404 if (_M_uses_single_bucket(__bkts)) 405 return; 406 407 __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count); 408 } 409 410 void 411 _M_deallocate_buckets() 412 { _M_deallocate_buckets(_M_buckets, _M_bucket_count); } 413 414 // Gets bucket begin, deals with the fact that non-empty buckets contain 415 // their before begin node. 416 __node_type* 417 _M_bucket_begin(size_type __bkt) const; 418 419 __node_type* 420 _M_begin() const 421 { return static_cast<__node_type*>(_M_before_begin._M_nxt); } 422 423 // Assign *this using another _Hashtable instance. Whether elements 424 // are copied or moved depends on the _Ht reference. 425 template<typename _Ht> 426 void 427 _M_assign_elements(_Ht&&); 428 429 template<typename _Ht, typename _NodeGenerator> 430 void 431 _M_assign(_Ht&&, const _NodeGenerator&); 432 433 void 434 _M_move_assign(_Hashtable&&, true_type); 435 436 void 437 _M_move_assign(_Hashtable&&, false_type); 438 439 void 440 _M_reset() noexcept; 441 442 _Hashtable(const _H1& __h1, const _H2& __h2, const _Hash& __h, 443 const _Equal& __eq, const _ExtractKey& __exk, 444 const allocator_type& __a) 445 : __hashtable_base(__exk, __h1, __h2, __h, __eq), 446 __hashtable_alloc(__node_alloc_type(__a)) 447 { } 448 449 public: 450 // Constructor, destructor, assignment, swap 451 _Hashtable() = default; 452 _Hashtable(size_type __bkt_count_hint, 453 const _H1&, const _H2&, const _Hash&, 454 const _Equal&, const _ExtractKey&, 455 const allocator_type&); 456 457 template<typename _InputIterator> 458 _Hashtable(_InputIterator __first, _InputIterator __last, 459 size_type __bkt_count_hint, 460 const _H1&, const _H2&, const _Hash&, 461 const _Equal&, const _ExtractKey&, 462 const allocator_type&); 463 464 _Hashtable(const _Hashtable&); 465 466 _Hashtable(_Hashtable&&) noexcept; 467 468 _Hashtable(const _Hashtable&, const allocator_type&); 469 470 _Hashtable(_Hashtable&&, const allocator_type&); 471 472 // Use delegating constructors. 473 explicit 474 _Hashtable(const allocator_type& __a) 475 : __hashtable_alloc(__node_alloc_type(__a)) 476 { } 477 478 explicit 479 _Hashtable(size_type __bkt_count_hint, 480 const _H1& __hf = _H1(), 481 const key_equal& __eql = key_equal(), 482 const allocator_type& __a = allocator_type()) 483 : _Hashtable(__bkt_count_hint, __hf, _H2(), _Hash(), __eql, 484 __key_extract(), __a) 485 { } 486 487 template<typename _InputIterator> 488 _Hashtable(_InputIterator __f, _InputIterator __l, 489 size_type __bkt_count_hint = 0, 490 const _H1& __hf = _H1(), 491 const key_equal& __eql = key_equal(), 492 const allocator_type& __a = allocator_type()) 493 : _Hashtable(__f, __l, __bkt_count_hint, __hf, _H2(), _Hash(), __eql, 494 __key_extract(), __a) 495 { } 496 497 _Hashtable(initializer_list<value_type> __l, 498 size_type __bkt_count_hint = 0, 499 const _H1& __hf = _H1(), 500 const key_equal& __eql = key_equal(), 501 const allocator_type& __a = allocator_type()) 502 : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint, 503 __hf, _H2(), _Hash(), __eql, 504 __key_extract(), __a) 505 { } 506 507 _Hashtable& 508 operator=(const _Hashtable& __ht); 509 510 _Hashtable& 511 operator=(_Hashtable&& __ht) 512 noexcept(__node_alloc_traits::_S_nothrow_move() 513 && is_nothrow_move_assignable<_H1>::value 514 && is_nothrow_move_assignable<_Equal>::value) 515 { 516 constexpr bool __move_storage = 517 __node_alloc_traits::_S_propagate_on_move_assign() 518 || __node_alloc_traits::_S_always_equal(); 519 _M_move_assign(std::move(__ht), __bool_constant<__move_storage>()); 520 return *this; 521 } 522 523 _Hashtable& 524 operator=(initializer_list<value_type> __l) 525 { 526 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this); 527 _M_before_begin._M_nxt = nullptr; 528 clear(); 529 this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys()); 530 return *this; 531 } 532 533 ~_Hashtable() noexcept; 534 535 void 536 swap(_Hashtable&) 537 noexcept(__and_<__is_nothrow_swappable<_H1>, 538 __is_nothrow_swappable<_Equal>>::value); 539 540 // Basic container operations 541 iterator 542 begin() noexcept 543 { return iterator(_M_begin()); } 544 545 const_iterator 546 begin() const noexcept 547 { return const_iterator(_M_begin()); } 548 549 iterator 550 end() noexcept 551 { return iterator(nullptr); } 552 553 const_iterator 554 end() const noexcept 555 { return const_iterator(nullptr); } 556 557 const_iterator 558 cbegin() const noexcept 559 { return const_iterator(_M_begin()); } 560 561 const_iterator 562 cend() const noexcept 563 { return const_iterator(nullptr); } 564 565 size_type 566 size() const noexcept 567 { return _M_element_count; } 568 569 _GLIBCXX_NODISCARD bool 570 empty() const noexcept 571 { return size() == 0; } 572 573 allocator_type 574 get_allocator() const noexcept 575 { return allocator_type(this->_M_node_allocator()); } 576 577 size_type 578 max_size() const noexcept 579 { return __node_alloc_traits::max_size(this->_M_node_allocator()); } 580 581 // Observers 582 key_equal 583 key_eq() const 584 { return this->_M_eq(); } 585 586 // hash_function, if present, comes from _Hash_code_base. 587 588 // Bucket operations 589 size_type 590 bucket_count() const noexcept 591 { return _M_bucket_count; } 592 593 size_type 594 max_bucket_count() const noexcept 595 { return max_size(); } 596 597 size_type 598 bucket_size(size_type __bkt) const 599 { return std::distance(begin(__bkt), end(__bkt)); } 600 601 size_type 602 bucket(const key_type& __k) const 603 { return _M_bucket_index(__k, this->_M_hash_code(__k)); } 604 605 local_iterator 606 begin(size_type __bkt) 607 { 608 return local_iterator(*this, _M_bucket_begin(__bkt), 609 __bkt, _M_bucket_count); 610 } 611 612 local_iterator 613 end(size_type __bkt) 614 { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); } 615 616 const_local_iterator 617 begin(size_type __bkt) const 618 { 619 return const_local_iterator(*this, _M_bucket_begin(__bkt), 620 __bkt, _M_bucket_count); 621 } 622 623 const_local_iterator 624 end(size_type __bkt) const 625 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); } 626 627 // DR 691. 628 const_local_iterator 629 cbegin(size_type __bkt) const 630 { 631 return const_local_iterator(*this, _M_bucket_begin(__bkt), 632 __bkt, _M_bucket_count); 633 } 634 635 const_local_iterator 636 cend(size_type __bkt) const 637 { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); } 638 639 float 640 load_factor() const noexcept 641 { 642 return static_cast<float>(size()) / static_cast<float>(bucket_count()); 643 } 644 645 // max_load_factor, if present, comes from _Rehash_base. 646 647 // Generalization of max_load_factor. Extension, not found in 648 // TR1. Only useful if _RehashPolicy is something other than 649 // the default. 650 const _RehashPolicy& 651 __rehash_policy() const 652 { return _M_rehash_policy; } 653 654 void 655 __rehash_policy(const _RehashPolicy& __pol) 656 { _M_rehash_policy = __pol; } 657 658 // Lookup. 659 iterator 660 find(const key_type& __k); 661 662 const_iterator 663 find(const key_type& __k) const; 664 665 size_type 666 count(const key_type& __k) const; 667 668 std::pair<iterator, iterator> 669 equal_range(const key_type& __k); 670 671 std::pair<const_iterator, const_iterator> 672 equal_range(const key_type& __k) const; 673 674 protected: 675 // Bucket index computation helpers. 676 size_type 677 _M_bucket_index(__node_type* __n) const noexcept 678 { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); } 679 680 size_type 681 _M_bucket_index(const key_type& __k, __hash_code __c) const 682 { return __hash_code_base::_M_bucket_index(__k, __c, _M_bucket_count); } 683 684 // Find and insert helper functions and types 685 // Find the node before the one matching the criteria. 686 __node_base* 687 _M_find_before_node(size_type, const key_type&, __hash_code) const; 688 689 __node_type* 690 _M_find_node(size_type __bkt, const key_type& __key, 691 __hash_code __c) const 692 { 693 __node_base* __before_n = _M_find_before_node(__bkt, __key, __c); 694 if (__before_n) 695 return static_cast<__node_type*>(__before_n->_M_nxt); 696 return nullptr; 697 } 698 699 // Insert a node at the beginning of a bucket. 700 void 701 _M_insert_bucket_begin(size_type, __node_type*); 702 703 // Remove the bucket first node 704 void 705 _M_remove_bucket_begin(size_type __bkt, __node_type* __next_n, 706 size_type __next_bkt); 707 708 // Get the node before __n in the bucket __bkt 709 __node_base* 710 _M_get_previous_node(size_type __bkt, __node_base* __n); 711 712 // Insert node __n with key __k and hash code __code, in bucket __bkt 713 // if no rehash (assumes no element with same key already present). 714 // Takes ownership of __n if insertion succeeds, throws otherwise. 715 iterator 716 _M_insert_unique_node(const key_type& __k, size_type __bkt, 717 __hash_code __code, __node_type* __n, 718 size_type __n_elt = 1); 719 720 // Insert node __n with key __k and hash code __code. 721 // Takes ownership of __n if insertion succeeds, throws otherwise. 722 iterator 723 _M_insert_multi_node(__node_type* __hint, const key_type& __k, 724 __hash_code __code, __node_type* __n); 725 726 template<typename... _Args> 727 std::pair<iterator, bool> 728 _M_emplace(true_type, _Args&&... __args); 729 730 template<typename... _Args> 731 iterator 732 _M_emplace(false_type __uk, _Args&&... __args) 733 { return _M_emplace(cend(), __uk, std::forward<_Args>(__args)...); } 734 735 // Emplace with hint, useless when keys are unique. 736 template<typename... _Args> 737 iterator 738 _M_emplace(const_iterator, true_type __uk, _Args&&... __args) 739 { return _M_emplace(__uk, std::forward<_Args>(__args)...).first; } 740 741 template<typename... _Args> 742 iterator 743 _M_emplace(const_iterator, false_type, _Args&&... __args); 744 745 template<typename _Arg, typename _NodeGenerator> 746 std::pair<iterator, bool> 747 _M_insert(_Arg&&, const _NodeGenerator&, true_type, size_type = 1); 748 749 template<typename _Arg, typename _NodeGenerator> 750 iterator 751 _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen, 752 false_type __uk) 753 { 754 return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen, 755 __uk); 756 } 757 758 // Insert with hint, not used when keys are unique. 759 template<typename _Arg, typename _NodeGenerator> 760 iterator 761 _M_insert(const_iterator, _Arg&& __arg, 762 const _NodeGenerator& __node_gen, true_type __uk) 763 { 764 return 765 _M_insert(std::forward<_Arg>(__arg), __node_gen, __uk).first; 766 } 767 768 // Insert with hint when keys are not unique. 769 template<typename _Arg, typename _NodeGenerator> 770 iterator 771 _M_insert(const_iterator, _Arg&&, 772 const _NodeGenerator&, false_type); 773 774 size_type 775 _M_erase(true_type, const key_type&); 776 777 size_type 778 _M_erase(false_type, const key_type&); 779 780 iterator 781 _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n); 782 783 public: 784 // Emplace 785 template<typename... _Args> 786 __ireturn_type 787 emplace(_Args&&... __args) 788 { return _M_emplace(__unique_keys(), std::forward<_Args>(__args)...); } 789 790 template<typename... _Args> 791 iterator 792 emplace_hint(const_iterator __hint, _Args&&... __args) 793 { 794 return _M_emplace(__hint, __unique_keys(), 795 std::forward<_Args>(__args)...); 796 } 797 798 // Insert member functions via inheritance. 799 800 // Erase 801 iterator 802 erase(const_iterator); 803 804 // LWG 2059. 805 iterator 806 erase(iterator __it) 807 { return erase(const_iterator(__it)); } 808 809 size_type 810 erase(const key_type& __k) 811 { return _M_erase(__unique_keys(), __k); } 812 813 iterator 814 erase(const_iterator, const_iterator); 815 816 void 817 clear() noexcept; 818 819 // Set number of buckets keeping it appropriate for container's number 820 // of elements. 821 void rehash(size_type __bkt_count); 822 823 // DR 1189. 824 // reserve, if present, comes from _Rehash_base. 825 826 #if __cplusplus > 201402L 827 /// Re-insert an extracted node into a container with unique keys. 828 insert_return_type 829 _M_reinsert_node(node_type&& __nh) 830 { 831 insert_return_type __ret; 832 if (__nh.empty()) 833 __ret.position = end(); 834 else 835 { 836 __glibcxx_assert(get_allocator() == __nh.get_allocator()); 837 838 const key_type& __k = __nh._M_key(); 839 __hash_code __code = this->_M_hash_code(__k); 840 size_type __bkt = _M_bucket_index(__k, __code); 841 if (__node_type* __n = _M_find_node(__bkt, __k, __code)) 842 { 843 __ret.node = std::move(__nh); 844 __ret.position = iterator(__n); 845 __ret.inserted = false; 846 } 847 else 848 { 849 __ret.position 850 = _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr); 851 __nh._M_ptr = nullptr; 852 __ret.inserted = true; 853 } 854 } 855 return __ret; 856 } 857 858 /// Re-insert an extracted node into a container with equivalent keys. 859 iterator 860 _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh) 861 { 862 if (__nh.empty()) 863 return end(); 864 865 __glibcxx_assert(get_allocator() == __nh.get_allocator()); 866 867 const key_type& __k = __nh._M_key(); 868 auto __code = this->_M_hash_code(__k); 869 auto __ret 870 = _M_insert_multi_node(__hint._M_cur, __k, __code, __nh._M_ptr); 871 __nh._M_ptr = nullptr; 872 return __ret; 873 } 874 875 private: 876 node_type 877 _M_extract_node(size_t __bkt, __node_base* __prev_n) 878 { 879 __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt); 880 if (__prev_n == _M_buckets[__bkt]) 881 _M_remove_bucket_begin(__bkt, __n->_M_next(), 882 __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0); 883 else if (__n->_M_nxt) 884 { 885 size_type __next_bkt = _M_bucket_index(__n->_M_next()); 886 if (__next_bkt != __bkt) 887 _M_buckets[__next_bkt] = __prev_n; 888 } 889 890 __prev_n->_M_nxt = __n->_M_nxt; 891 __n->_M_nxt = nullptr; 892 --_M_element_count; 893 return { __n, this->_M_node_allocator() }; 894 } 895 896 public: 897 // Extract a node. 898 node_type 899 extract(const_iterator __pos) 900 { 901 size_t __bkt = _M_bucket_index(__pos._M_cur); 902 return _M_extract_node(__bkt, 903 _M_get_previous_node(__bkt, __pos._M_cur)); 904 } 905 906 /// Extract a node. 907 node_type 908 extract(const _Key& __k) 909 { 910 node_type __nh; 911 __hash_code __code = this->_M_hash_code(__k); 912 std::size_t __bkt = _M_bucket_index(__k, __code); 913 if (__node_base* __prev_node = _M_find_before_node(__bkt, __k, __code)) 914 __nh = _M_extract_node(__bkt, __prev_node); 915 return __nh; 916 } 917 918 /// Merge from a compatible container into one with unique keys. 919 template<typename _Compatible_Hashtable> 920 void 921 _M_merge_unique(_Compatible_Hashtable& __src) noexcept 922 { 923 static_assert(is_same_v<typename _Compatible_Hashtable::node_type, 924 node_type>, "Node types are compatible"); 925 __glibcxx_assert(get_allocator() == __src.get_allocator()); 926 927 auto __n_elt = __src.size(); 928 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;) 929 { 930 auto __pos = __i++; 931 const key_type& __k = this->_M_extract()(*__pos); 932 __hash_code __code = this->_M_hash_code(__k); 933 size_type __bkt = _M_bucket_index(__k, __code); 934 if (_M_find_node(__bkt, __k, __code) == nullptr) 935 { 936 auto __nh = __src.extract(__pos); 937 _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr, 938 __n_elt); 939 __nh._M_ptr = nullptr; 940 __n_elt = 1; 941 } 942 else if (__n_elt != 1) 943 --__n_elt; 944 } 945 } 946 947 /// Merge from a compatible container into one with equivalent keys. 948 template<typename _Compatible_Hashtable> 949 void 950 _M_merge_multi(_Compatible_Hashtable& __src) noexcept 951 { 952 static_assert(is_same_v<typename _Compatible_Hashtable::node_type, 953 node_type>, "Node types are compatible"); 954 __glibcxx_assert(get_allocator() == __src.get_allocator()); 955 956 this->reserve(size() + __src.size()); 957 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;) 958 _M_reinsert_node_multi(cend(), __src.extract(__i++)); 959 } 960 #endif // C++17 961 962 private: 963 // Helper rehash method used when keys are unique. 964 void _M_rehash_aux(size_type __bkt_count, true_type); 965 966 // Helper rehash method used when keys can be non-unique. 967 void _M_rehash_aux(size_type __bkt_count, false_type); 968 969 // Unconditionally change size of bucket array to n, restore 970 // hash policy state to __state on exception. 971 void _M_rehash(size_type __bkt_count, const __rehash_state& __state); 972 }; 973 974 975 // Definitions of class template _Hashtable's out-of-line member functions. 976 template<typename _Key, typename _Value, 977 typename _Alloc, typename _ExtractKey, typename _Equal, 978 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 979 typename _Traits> 980 auto 981 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 982 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 983 _M_bucket_begin(size_type __bkt) const 984 -> __node_type* 985 { 986 __node_base* __n = _M_buckets[__bkt]; 987 return __n ? static_cast<__node_type*>(__n->_M_nxt) : nullptr; 988 } 989 990 template<typename _Key, typename _Value, 991 typename _Alloc, typename _ExtractKey, typename _Equal, 992 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 993 typename _Traits> 994 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 995 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 996 _Hashtable(size_type __bkt_count_hint, 997 const _H1& __h1, const _H2& __h2, const _Hash& __h, 998 const _Equal& __eq, const _ExtractKey& __exk, 999 const allocator_type& __a) 1000 : _Hashtable(__h1, __h2, __h, __eq, __exk, __a) 1001 { 1002 auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint); 1003 if (__bkt_count > _M_bucket_count) 1004 { 1005 _M_buckets = _M_allocate_buckets(__bkt_count); 1006 _M_bucket_count = __bkt_count; 1007 } 1008 } 1009 1010 template<typename _Key, typename _Value, 1011 typename _Alloc, typename _ExtractKey, typename _Equal, 1012 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1013 typename _Traits> 1014 template<typename _InputIterator> 1015 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1016 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1017 _Hashtable(_InputIterator __f, _InputIterator __l, 1018 size_type __bkt_count_hint, 1019 const _H1& __h1, const _H2& __h2, const _Hash& __h, 1020 const _Equal& __eq, const _ExtractKey& __exk, 1021 const allocator_type& __a) 1022 : _Hashtable(__h1, __h2, __h, __eq, __exk, __a) 1023 { 1024 auto __nb_elems = __detail::__distance_fw(__f, __l); 1025 auto __bkt_count = 1026 _M_rehash_policy._M_next_bkt( 1027 std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems), 1028 __bkt_count_hint)); 1029 1030 if (__bkt_count > _M_bucket_count) 1031 { 1032 _M_buckets = _M_allocate_buckets(__bkt_count); 1033 _M_bucket_count = __bkt_count; 1034 } 1035 1036 for (; __f != __l; ++__f) 1037 this->insert(*__f); 1038 } 1039 1040 template<typename _Key, typename _Value, 1041 typename _Alloc, typename _ExtractKey, typename _Equal, 1042 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1043 typename _Traits> 1044 auto 1045 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1046 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1047 operator=(const _Hashtable& __ht) 1048 -> _Hashtable& 1049 { 1050 if (&__ht == this) 1051 return *this; 1052 1053 if (__node_alloc_traits::_S_propagate_on_copy_assign()) 1054 { 1055 auto& __this_alloc = this->_M_node_allocator(); 1056 auto& __that_alloc = __ht._M_node_allocator(); 1057 if (!__node_alloc_traits::_S_always_equal() 1058 && __this_alloc != __that_alloc) 1059 { 1060 // Replacement allocator cannot free existing storage. 1061 this->_M_deallocate_nodes(_M_begin()); 1062 _M_before_begin._M_nxt = nullptr; 1063 _M_deallocate_buckets(); 1064 _M_buckets = nullptr; 1065 std::__alloc_on_copy(__this_alloc, __that_alloc); 1066 __hashtable_base::operator=(__ht); 1067 _M_bucket_count = __ht._M_bucket_count; 1068 _M_element_count = __ht._M_element_count; 1069 _M_rehash_policy = __ht._M_rehash_policy; 1070 __alloc_node_gen_t __alloc_node_gen(*this); 1071 __try 1072 { 1073 _M_assign(__ht, __alloc_node_gen); 1074 } 1075 __catch(...) 1076 { 1077 // _M_assign took care of deallocating all memory. Now we 1078 // must make sure this instance remains in a usable state. 1079 _M_reset(); 1080 __throw_exception_again; 1081 } 1082 return *this; 1083 } 1084 std::__alloc_on_copy(__this_alloc, __that_alloc); 1085 } 1086 1087 // Reuse allocated buckets and nodes. 1088 _M_assign_elements(__ht); 1089 return *this; 1090 } 1091 1092 template<typename _Key, typename _Value, 1093 typename _Alloc, typename _ExtractKey, typename _Equal, 1094 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1095 typename _Traits> 1096 template<typename _Ht> 1097 void 1098 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1099 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1100 _M_assign_elements(_Ht&& __ht) 1101 { 1102 __bucket_type* __former_buckets = nullptr; 1103 std::size_t __former_bucket_count = _M_bucket_count; 1104 const __rehash_state& __former_state = _M_rehash_policy._M_state(); 1105 1106 if (_M_bucket_count != __ht._M_bucket_count) 1107 { 1108 __former_buckets = _M_buckets; 1109 _M_buckets = _M_allocate_buckets(__ht._M_bucket_count); 1110 _M_bucket_count = __ht._M_bucket_count; 1111 } 1112 else 1113 __builtin_memset(_M_buckets, 0, 1114 _M_bucket_count * sizeof(__bucket_type)); 1115 1116 __try 1117 { 1118 __hashtable_base::operator=(std::forward<_Ht>(__ht)); 1119 _M_element_count = __ht._M_element_count; 1120 _M_rehash_policy = __ht._M_rehash_policy; 1121 __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this); 1122 _M_before_begin._M_nxt = nullptr; 1123 _M_assign(std::forward<_Ht>(__ht), __roan); 1124 if (__former_buckets) 1125 _M_deallocate_buckets(__former_buckets, __former_bucket_count); 1126 } 1127 __catch(...) 1128 { 1129 if (__former_buckets) 1130 { 1131 // Restore previous buckets. 1132 _M_deallocate_buckets(); 1133 _M_rehash_policy._M_reset(__former_state); 1134 _M_buckets = __former_buckets; 1135 _M_bucket_count = __former_bucket_count; 1136 } 1137 __builtin_memset(_M_buckets, 0, 1138 _M_bucket_count * sizeof(__bucket_type)); 1139 __throw_exception_again; 1140 } 1141 } 1142 1143 template<typename _Key, typename _Value, 1144 typename _Alloc, typename _ExtractKey, typename _Equal, 1145 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1146 typename _Traits> 1147 template<typename _Ht, typename _NodeGenerator> 1148 void 1149 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1150 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1151 _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen) 1152 { 1153 __bucket_type* __buckets = nullptr; 1154 if (!_M_buckets) 1155 _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count); 1156 1157 __try 1158 { 1159 if (!__ht._M_before_begin._M_nxt) 1160 return; 1161 1162 // First deal with the special first node pointed to by 1163 // _M_before_begin. 1164 __node_type* __ht_n = __ht._M_begin(); 1165 __node_type* __this_n 1166 = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v())); 1167 this->_M_copy_code(__this_n, __ht_n); 1168 _M_before_begin._M_nxt = __this_n; 1169 _M_buckets[_M_bucket_index(__this_n)] = &_M_before_begin; 1170 1171 // Then deal with other nodes. 1172 __node_base* __prev_n = __this_n; 1173 for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next()) 1174 { 1175 __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v())); 1176 __prev_n->_M_nxt = __this_n; 1177 this->_M_copy_code(__this_n, __ht_n); 1178 size_type __bkt = _M_bucket_index(__this_n); 1179 if (!_M_buckets[__bkt]) 1180 _M_buckets[__bkt] = __prev_n; 1181 __prev_n = __this_n; 1182 } 1183 } 1184 __catch(...) 1185 { 1186 clear(); 1187 if (__buckets) 1188 _M_deallocate_buckets(); 1189 __throw_exception_again; 1190 } 1191 } 1192 1193 template<typename _Key, typename _Value, 1194 typename _Alloc, typename _ExtractKey, typename _Equal, 1195 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1196 typename _Traits> 1197 void 1198 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1199 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1200 _M_reset() noexcept 1201 { 1202 _M_rehash_policy._M_reset(); 1203 _M_bucket_count = 1; 1204 _M_single_bucket = nullptr; 1205 _M_buckets = &_M_single_bucket; 1206 _M_before_begin._M_nxt = nullptr; 1207 _M_element_count = 0; 1208 } 1209 1210 template<typename _Key, typename _Value, 1211 typename _Alloc, typename _ExtractKey, typename _Equal, 1212 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1213 typename _Traits> 1214 void 1215 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1216 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1217 _M_move_assign(_Hashtable&& __ht, true_type) 1218 { 1219 this->_M_deallocate_nodes(_M_begin()); 1220 _M_deallocate_buckets(); 1221 __hashtable_base::operator=(std::move(__ht)); 1222 _M_rehash_policy = __ht._M_rehash_policy; 1223 if (!__ht._M_uses_single_bucket()) 1224 _M_buckets = __ht._M_buckets; 1225 else 1226 { 1227 _M_buckets = &_M_single_bucket; 1228 _M_single_bucket = __ht._M_single_bucket; 1229 } 1230 _M_bucket_count = __ht._M_bucket_count; 1231 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt; 1232 _M_element_count = __ht._M_element_count; 1233 std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator()); 1234 1235 // Fix buckets containing the _M_before_begin pointers that can't be 1236 // moved. 1237 if (_M_begin()) 1238 _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; 1239 __ht._M_reset(); 1240 } 1241 1242 template<typename _Key, typename _Value, 1243 typename _Alloc, typename _ExtractKey, typename _Equal, 1244 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1245 typename _Traits> 1246 void 1247 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1248 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1249 _M_move_assign(_Hashtable&& __ht, false_type) 1250 { 1251 if (__ht._M_node_allocator() == this->_M_node_allocator()) 1252 _M_move_assign(std::move(__ht), true_type()); 1253 else 1254 { 1255 // Can't move memory, move elements then. 1256 _M_assign_elements(std::move(__ht)); 1257 __ht.clear(); 1258 } 1259 } 1260 1261 template<typename _Key, typename _Value, 1262 typename _Alloc, typename _ExtractKey, typename _Equal, 1263 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1264 typename _Traits> 1265 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1266 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1267 _Hashtable(const _Hashtable& __ht) 1268 : __hashtable_base(__ht), 1269 __map_base(__ht), 1270 __rehash_base(__ht), 1271 __hashtable_alloc( 1272 __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())), 1273 _M_buckets(nullptr), 1274 _M_bucket_count(__ht._M_bucket_count), 1275 _M_element_count(__ht._M_element_count), 1276 _M_rehash_policy(__ht._M_rehash_policy) 1277 { 1278 __alloc_node_gen_t __alloc_node_gen(*this); 1279 _M_assign(__ht, __alloc_node_gen); 1280 } 1281 1282 template<typename _Key, typename _Value, 1283 typename _Alloc, typename _ExtractKey, typename _Equal, 1284 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1285 typename _Traits> 1286 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1287 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1288 _Hashtable(_Hashtable&& __ht) noexcept 1289 : __hashtable_base(__ht), 1290 __map_base(__ht), 1291 __rehash_base(__ht), 1292 __hashtable_alloc(std::move(__ht._M_base_alloc())), 1293 _M_buckets(__ht._M_buckets), 1294 _M_bucket_count(__ht._M_bucket_count), 1295 _M_before_begin(__ht._M_before_begin._M_nxt), 1296 _M_element_count(__ht._M_element_count), 1297 _M_rehash_policy(__ht._M_rehash_policy) 1298 { 1299 // Update, if necessary, buckets if __ht is using its single bucket. 1300 if (__ht._M_uses_single_bucket()) 1301 { 1302 _M_buckets = &_M_single_bucket; 1303 _M_single_bucket = __ht._M_single_bucket; 1304 } 1305 1306 // Update, if necessary, bucket pointing to before begin that hasn't 1307 // moved. 1308 if (_M_begin()) 1309 _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; 1310 1311 __ht._M_reset(); 1312 } 1313 1314 template<typename _Key, typename _Value, 1315 typename _Alloc, typename _ExtractKey, typename _Equal, 1316 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1317 typename _Traits> 1318 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1319 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1320 _Hashtable(const _Hashtable& __ht, const allocator_type& __a) 1321 : __hashtable_base(__ht), 1322 __map_base(__ht), 1323 __rehash_base(__ht), 1324 __hashtable_alloc(__node_alloc_type(__a)), 1325 _M_buckets(), 1326 _M_bucket_count(__ht._M_bucket_count), 1327 _M_element_count(__ht._M_element_count), 1328 _M_rehash_policy(__ht._M_rehash_policy) 1329 { 1330 __alloc_node_gen_t __alloc_node_gen(*this); 1331 _M_assign(__ht, __alloc_node_gen); 1332 } 1333 1334 template<typename _Key, typename _Value, 1335 typename _Alloc, typename _ExtractKey, typename _Equal, 1336 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1337 typename _Traits> 1338 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1339 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1340 _Hashtable(_Hashtable&& __ht, const allocator_type& __a) 1341 : __hashtable_base(__ht), 1342 __map_base(__ht), 1343 __rehash_base(__ht), 1344 __hashtable_alloc(__node_alloc_type(__a)), 1345 _M_buckets(nullptr), 1346 _M_bucket_count(__ht._M_bucket_count), 1347 _M_element_count(__ht._M_element_count), 1348 _M_rehash_policy(__ht._M_rehash_policy) 1349 { 1350 if (__ht._M_node_allocator() == this->_M_node_allocator()) 1351 { 1352 if (__ht._M_uses_single_bucket()) 1353 { 1354 _M_buckets = &_M_single_bucket; 1355 _M_single_bucket = __ht._M_single_bucket; 1356 } 1357 else 1358 _M_buckets = __ht._M_buckets; 1359 1360 _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt; 1361 // Update, if necessary, bucket pointing to before begin that hasn't 1362 // moved. 1363 if (_M_begin()) 1364 _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; 1365 __ht._M_reset(); 1366 } 1367 else 1368 { 1369 __alloc_node_gen_t __alloc_gen(*this); 1370 1371 using _Fwd_Ht = typename 1372 conditional<__move_if_noexcept_cond<value_type>::value, 1373 const _Hashtable&, _Hashtable&&>::type; 1374 _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen); 1375 __ht.clear(); 1376 } 1377 } 1378 1379 template<typename _Key, typename _Value, 1380 typename _Alloc, typename _ExtractKey, typename _Equal, 1381 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1382 typename _Traits> 1383 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1384 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1385 ~_Hashtable() noexcept 1386 { 1387 clear(); 1388 _M_deallocate_buckets(); 1389 } 1390 1391 template<typename _Key, typename _Value, 1392 typename _Alloc, typename _ExtractKey, typename _Equal, 1393 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1394 typename _Traits> 1395 void 1396 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1397 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1398 swap(_Hashtable& __x) 1399 noexcept(__and_<__is_nothrow_swappable<_H1>, 1400 __is_nothrow_swappable<_Equal>>::value) 1401 { 1402 // The only base class with member variables is hash_code_base. 1403 // We define _Hash_code_base::_M_swap because different 1404 // specializations have different members. 1405 this->_M_swap(__x); 1406 1407 std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator()); 1408 std::swap(_M_rehash_policy, __x._M_rehash_policy); 1409 1410 // Deal properly with potentially moved instances. 1411 if (this->_M_uses_single_bucket()) 1412 { 1413 if (!__x._M_uses_single_bucket()) 1414 { 1415 _M_buckets = __x._M_buckets; 1416 __x._M_buckets = &__x._M_single_bucket; 1417 } 1418 } 1419 else if (__x._M_uses_single_bucket()) 1420 { 1421 __x._M_buckets = _M_buckets; 1422 _M_buckets = &_M_single_bucket; 1423 } 1424 else 1425 std::swap(_M_buckets, __x._M_buckets); 1426 1427 std::swap(_M_bucket_count, __x._M_bucket_count); 1428 std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt); 1429 std::swap(_M_element_count, __x._M_element_count); 1430 std::swap(_M_single_bucket, __x._M_single_bucket); 1431 1432 // Fix buckets containing the _M_before_begin pointers that can't be 1433 // swapped. 1434 if (_M_begin()) 1435 _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; 1436 1437 if (__x._M_begin()) 1438 __x._M_buckets[__x._M_bucket_index(__x._M_begin())] 1439 = &__x._M_before_begin; 1440 } 1441 1442 template<typename _Key, typename _Value, 1443 typename _Alloc, typename _ExtractKey, typename _Equal, 1444 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1445 typename _Traits> 1446 auto 1447 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1448 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1449 find(const key_type& __k) 1450 -> iterator 1451 { 1452 __hash_code __code = this->_M_hash_code(__k); 1453 std::size_t __bkt = _M_bucket_index(__k, __code); 1454 __node_type* __p = _M_find_node(__bkt, __k, __code); 1455 return __p ? iterator(__p) : end(); 1456 } 1457 1458 template<typename _Key, typename _Value, 1459 typename _Alloc, typename _ExtractKey, typename _Equal, 1460 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1461 typename _Traits> 1462 auto 1463 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1464 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1465 find(const key_type& __k) const 1466 -> const_iterator 1467 { 1468 __hash_code __code = this->_M_hash_code(__k); 1469 std::size_t __bkt = _M_bucket_index(__k, __code); 1470 __node_type* __p = _M_find_node(__bkt, __k, __code); 1471 return __p ? const_iterator(__p) : end(); 1472 } 1473 1474 template<typename _Key, typename _Value, 1475 typename _Alloc, typename _ExtractKey, typename _Equal, 1476 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1477 typename _Traits> 1478 auto 1479 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1480 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1481 count(const key_type& __k) const 1482 -> size_type 1483 { 1484 __hash_code __code = this->_M_hash_code(__k); 1485 std::size_t __bkt = _M_bucket_index(__k, __code); 1486 __node_type* __p = _M_bucket_begin(__bkt); 1487 if (!__p) 1488 return 0; 1489 1490 std::size_t __result = 0; 1491 for (;; __p = __p->_M_next()) 1492 { 1493 if (this->_M_equals(__k, __code, __p)) 1494 ++__result; 1495 else if (__result) 1496 // All equivalent values are next to each other, if we 1497 // found a non-equivalent value after an equivalent one it 1498 // means that we won't find any new equivalent value. 1499 break; 1500 if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt) 1501 break; 1502 } 1503 return __result; 1504 } 1505 1506 template<typename _Key, typename _Value, 1507 typename _Alloc, typename _ExtractKey, typename _Equal, 1508 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1509 typename _Traits> 1510 auto 1511 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1512 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1513 equal_range(const key_type& __k) 1514 -> pair<iterator, iterator> 1515 { 1516 __hash_code __code = this->_M_hash_code(__k); 1517 std::size_t __bkt = _M_bucket_index(__k, __code); 1518 __node_type* __p = _M_find_node(__bkt, __k, __code); 1519 1520 if (__p) 1521 { 1522 __node_type* __p1 = __p->_M_next(); 1523 while (__p1 && _M_bucket_index(__p1) == __bkt 1524 && this->_M_equals(__k, __code, __p1)) 1525 __p1 = __p1->_M_next(); 1526 1527 return std::make_pair(iterator(__p), iterator(__p1)); 1528 } 1529 else 1530 return std::make_pair(end(), end()); 1531 } 1532 1533 template<typename _Key, typename _Value, 1534 typename _Alloc, typename _ExtractKey, typename _Equal, 1535 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1536 typename _Traits> 1537 auto 1538 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1539 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1540 equal_range(const key_type& __k) const 1541 -> pair<const_iterator, const_iterator> 1542 { 1543 __hash_code __code = this->_M_hash_code(__k); 1544 std::size_t __bkt = _M_bucket_index(__k, __code); 1545 __node_type* __p = _M_find_node(__bkt, __k, __code); 1546 1547 if (__p) 1548 { 1549 __node_type* __p1 = __p->_M_next(); 1550 while (__p1 && _M_bucket_index(__p1) == __bkt 1551 && this->_M_equals(__k, __code, __p1)) 1552 __p1 = __p1->_M_next(); 1553 1554 return std::make_pair(const_iterator(__p), const_iterator(__p1)); 1555 } 1556 else 1557 return std::make_pair(end(), end()); 1558 } 1559 1560 // Find the node whose key compares equal to k in the bucket bkt. 1561 // Return nullptr if no node is found. 1562 template<typename _Key, typename _Value, 1563 typename _Alloc, typename _ExtractKey, typename _Equal, 1564 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1565 typename _Traits> 1566 auto 1567 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1568 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1569 _M_find_before_node(size_type __bkt, const key_type& __k, 1570 __hash_code __code) const 1571 -> __node_base* 1572 { 1573 __node_base* __prev_p = _M_buckets[__bkt]; 1574 if (!__prev_p) 1575 return nullptr; 1576 1577 for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);; 1578 __p = __p->_M_next()) 1579 { 1580 if (this->_M_equals(__k, __code, __p)) 1581 return __prev_p; 1582 1583 if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt) 1584 break; 1585 __prev_p = __p; 1586 } 1587 return nullptr; 1588 } 1589 1590 template<typename _Key, typename _Value, 1591 typename _Alloc, typename _ExtractKey, typename _Equal, 1592 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1593 typename _Traits> 1594 void 1595 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1596 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1597 _M_insert_bucket_begin(size_type __bkt, __node_type* __node) 1598 { 1599 if (_M_buckets[__bkt]) 1600 { 1601 // Bucket is not empty, we just need to insert the new node 1602 // after the bucket before begin. 1603 __node->_M_nxt = _M_buckets[__bkt]->_M_nxt; 1604 _M_buckets[__bkt]->_M_nxt = __node; 1605 } 1606 else 1607 { 1608 // The bucket is empty, the new node is inserted at the 1609 // beginning of the singly-linked list and the bucket will 1610 // contain _M_before_begin pointer. 1611 __node->_M_nxt = _M_before_begin._M_nxt; 1612 _M_before_begin._M_nxt = __node; 1613 if (__node->_M_nxt) 1614 // We must update former begin bucket that is pointing to 1615 // _M_before_begin. 1616 _M_buckets[_M_bucket_index(__node->_M_next())] = __node; 1617 _M_buckets[__bkt] = &_M_before_begin; 1618 } 1619 } 1620 1621 template<typename _Key, typename _Value, 1622 typename _Alloc, typename _ExtractKey, typename _Equal, 1623 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1624 typename _Traits> 1625 void 1626 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1627 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1628 _M_remove_bucket_begin(size_type __bkt, __node_type* __next, 1629 size_type __next_bkt) 1630 { 1631 if (!__next || __next_bkt != __bkt) 1632 { 1633 // Bucket is now empty 1634 // First update next bucket if any 1635 if (__next) 1636 _M_buckets[__next_bkt] = _M_buckets[__bkt]; 1637 1638 // Second update before begin node if necessary 1639 if (&_M_before_begin == _M_buckets[__bkt]) 1640 _M_before_begin._M_nxt = __next; 1641 _M_buckets[__bkt] = nullptr; 1642 } 1643 } 1644 1645 template<typename _Key, typename _Value, 1646 typename _Alloc, typename _ExtractKey, typename _Equal, 1647 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1648 typename _Traits> 1649 auto 1650 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1651 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1652 _M_get_previous_node(size_type __bkt, __node_base* __n) 1653 -> __node_base* 1654 { 1655 __node_base* __prev_n = _M_buckets[__bkt]; 1656 while (__prev_n->_M_nxt != __n) 1657 __prev_n = __prev_n->_M_nxt; 1658 return __prev_n; 1659 } 1660 1661 template<typename _Key, typename _Value, 1662 typename _Alloc, typename _ExtractKey, typename _Equal, 1663 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1664 typename _Traits> 1665 template<typename... _Args> 1666 auto 1667 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1668 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1669 _M_emplace(true_type, _Args&&... __args) 1670 -> pair<iterator, bool> 1671 { 1672 // First build the node to get access to the hash code 1673 _Scoped_node __node { this, std::forward<_Args>(__args)... }; 1674 const key_type& __k = this->_M_extract()(__node._M_node->_M_v()); 1675 __hash_code __code = this->_M_hash_code(__k); 1676 size_type __bkt = _M_bucket_index(__k, __code); 1677 if (__node_type* __p = _M_find_node(__bkt, __k, __code)) 1678 // There is already an equivalent node, no insertion 1679 return std::make_pair(iterator(__p), false); 1680 1681 // Insert the node 1682 auto __pos = _M_insert_unique_node(__k, __bkt, __code, __node._M_node); 1683 __node._M_node = nullptr; 1684 return { __pos, true }; 1685 } 1686 1687 template<typename _Key, typename _Value, 1688 typename _Alloc, typename _ExtractKey, typename _Equal, 1689 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1690 typename _Traits> 1691 template<typename... _Args> 1692 auto 1693 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1694 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1695 _M_emplace(const_iterator __hint, false_type, _Args&&... __args) 1696 -> iterator 1697 { 1698 // First build the node to get its hash code. 1699 _Scoped_node __node { this, std::forward<_Args>(__args)... }; 1700 const key_type& __k = this->_M_extract()(__node._M_node->_M_v()); 1701 1702 __hash_code __code = this->_M_hash_code(__k); 1703 auto __pos 1704 = _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node); 1705 __node._M_node = nullptr; 1706 return __pos; 1707 } 1708 1709 template<typename _Key, typename _Value, 1710 typename _Alloc, typename _ExtractKey, typename _Equal, 1711 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1712 typename _Traits> 1713 auto 1714 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1715 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1716 _M_insert_unique_node(const key_type& __k, size_type __bkt, 1717 __hash_code __code, __node_type* __node, 1718 size_type __n_elt) 1719 -> iterator 1720 { 1721 const __rehash_state& __saved_state = _M_rehash_policy._M_state(); 1722 std::pair<bool, std::size_t> __do_rehash 1723 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1724 __n_elt); 1725 1726 if (__do_rehash.first) 1727 { 1728 _M_rehash(__do_rehash.second, __saved_state); 1729 __bkt = _M_bucket_index(__k, __code); 1730 } 1731 1732 this->_M_store_code(__node, __code); 1733 1734 // Always insert at the beginning of the bucket. 1735 _M_insert_bucket_begin(__bkt, __node); 1736 ++_M_element_count; 1737 return iterator(__node); 1738 } 1739 1740 template<typename _Key, typename _Value, 1741 typename _Alloc, typename _ExtractKey, typename _Equal, 1742 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1743 typename _Traits> 1744 auto 1745 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1746 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1747 _M_insert_multi_node(__node_type* __hint, const key_type& __k, 1748 __hash_code __code, __node_type* __node) 1749 -> iterator 1750 { 1751 const __rehash_state& __saved_state = _M_rehash_policy._M_state(); 1752 std::pair<bool, std::size_t> __do_rehash 1753 = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); 1754 1755 if (__do_rehash.first) 1756 _M_rehash(__do_rehash.second, __saved_state); 1757 1758 this->_M_store_code(__node, __code); 1759 size_type __bkt = _M_bucket_index(__k, __code); 1760 1761 // Find the node before an equivalent one or use hint if it exists and 1762 // if it is equivalent. 1763 __node_base* __prev 1764 = __builtin_expect(__hint != nullptr, false) 1765 && this->_M_equals(__k, __code, __hint) 1766 ? __hint 1767 : _M_find_before_node(__bkt, __k, __code); 1768 if (__prev) 1769 { 1770 // Insert after the node before the equivalent one. 1771 __node->_M_nxt = __prev->_M_nxt; 1772 __prev->_M_nxt = __node; 1773 if (__builtin_expect(__prev == __hint, false)) 1774 // hint might be the last bucket node, in this case we need to 1775 // update next bucket. 1776 if (__node->_M_nxt 1777 && !this->_M_equals(__k, __code, __node->_M_next())) 1778 { 1779 size_type __next_bkt = _M_bucket_index(__node->_M_next()); 1780 if (__next_bkt != __bkt) 1781 _M_buckets[__next_bkt] = __node; 1782 } 1783 } 1784 else 1785 // The inserted node has no equivalent in the hashtable. We must 1786 // insert the new node at the beginning of the bucket to preserve 1787 // equivalent elements' relative positions. 1788 _M_insert_bucket_begin(__bkt, __node); 1789 ++_M_element_count; 1790 return iterator(__node); 1791 } 1792 1793 // Insert v if no element with its key is already present. 1794 template<typename _Key, typename _Value, 1795 typename _Alloc, typename _ExtractKey, typename _Equal, 1796 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1797 typename _Traits> 1798 template<typename _Arg, typename _NodeGenerator> 1799 auto 1800 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1801 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1802 _M_insert(_Arg&& __v, const _NodeGenerator& __node_gen, true_type, 1803 size_type __n_elt) 1804 -> pair<iterator, bool> 1805 { 1806 const key_type& __k = this->_M_extract()(__v); 1807 __hash_code __code = this->_M_hash_code(__k); 1808 size_type __bkt = _M_bucket_index(__k, __code); 1809 1810 if (__node_type* __node = _M_find_node(__bkt, __k, __code)) 1811 return { iterator(__node), false }; 1812 1813 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this }; 1814 auto __pos 1815 = _M_insert_unique_node(__k, __bkt, __code, __node._M_node, __n_elt); 1816 __node._M_node = nullptr; 1817 return { __pos, true }; 1818 } 1819 1820 // Insert v unconditionally. 1821 template<typename _Key, typename _Value, 1822 typename _Alloc, typename _ExtractKey, typename _Equal, 1823 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1824 typename _Traits> 1825 template<typename _Arg, typename _NodeGenerator> 1826 auto 1827 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1828 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1829 _M_insert(const_iterator __hint, _Arg&& __v, 1830 const _NodeGenerator& __node_gen, false_type) 1831 -> iterator 1832 { 1833 // First compute the hash code so that we don't do anything if it 1834 // throws. 1835 __hash_code __code = this->_M_hash_code(this->_M_extract()(__v)); 1836 1837 // Second allocate new node so that we don't rehash if it throws. 1838 _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this }; 1839 const key_type& __k = this->_M_extract()(__node._M_node->_M_v()); 1840 auto __pos 1841 = _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node); 1842 __node._M_node = nullptr; 1843 return __pos; 1844 } 1845 1846 template<typename _Key, typename _Value, 1847 typename _Alloc, typename _ExtractKey, typename _Equal, 1848 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1849 typename _Traits> 1850 auto 1851 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1852 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1853 erase(const_iterator __it) 1854 -> iterator 1855 { 1856 __node_type* __n = __it._M_cur; 1857 std::size_t __bkt = _M_bucket_index(__n); 1858 1859 // Look for previous node to unlink it from the erased one, this 1860 // is why we need buckets to contain the before begin to make 1861 // this search fast. 1862 __node_base* __prev_n = _M_get_previous_node(__bkt, __n); 1863 return _M_erase(__bkt, __prev_n, __n); 1864 } 1865 1866 template<typename _Key, typename _Value, 1867 typename _Alloc, typename _ExtractKey, typename _Equal, 1868 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1869 typename _Traits> 1870 auto 1871 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1872 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1873 _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n) 1874 -> iterator 1875 { 1876 if (__prev_n == _M_buckets[__bkt]) 1877 _M_remove_bucket_begin(__bkt, __n->_M_next(), 1878 __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0); 1879 else if (__n->_M_nxt) 1880 { 1881 size_type __next_bkt = _M_bucket_index(__n->_M_next()); 1882 if (__next_bkt != __bkt) 1883 _M_buckets[__next_bkt] = __prev_n; 1884 } 1885 1886 __prev_n->_M_nxt = __n->_M_nxt; 1887 iterator __result(__n->_M_next()); 1888 this->_M_deallocate_node(__n); 1889 --_M_element_count; 1890 1891 return __result; 1892 } 1893 1894 template<typename _Key, typename _Value, 1895 typename _Alloc, typename _ExtractKey, typename _Equal, 1896 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1897 typename _Traits> 1898 auto 1899 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1900 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1901 _M_erase(true_type, const key_type& __k) 1902 -> size_type 1903 { 1904 __hash_code __code = this->_M_hash_code(__k); 1905 std::size_t __bkt = _M_bucket_index(__k, __code); 1906 1907 // Look for the node before the first matching node. 1908 __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code); 1909 if (!__prev_n) 1910 return 0; 1911 1912 // We found a matching node, erase it. 1913 __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt); 1914 _M_erase(__bkt, __prev_n, __n); 1915 return 1; 1916 } 1917 1918 template<typename _Key, typename _Value, 1919 typename _Alloc, typename _ExtractKey, typename _Equal, 1920 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1921 typename _Traits> 1922 auto 1923 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1924 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1925 _M_erase(false_type, const key_type& __k) 1926 -> size_type 1927 { 1928 __hash_code __code = this->_M_hash_code(__k); 1929 std::size_t __bkt = _M_bucket_index(__k, __code); 1930 1931 // Look for the node before the first matching node. 1932 __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code); 1933 if (!__prev_n) 1934 return 0; 1935 1936 // _GLIBCXX_RESOLVE_LIB_DEFECTS 1937 // 526. Is it undefined if a function in the standard changes 1938 // in parameters? 1939 // We use one loop to find all matching nodes and another to deallocate 1940 // them so that the key stays valid during the first loop. It might be 1941 // invalidated indirectly when destroying nodes. 1942 __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt); 1943 __node_type* __n_last = __n; 1944 std::size_t __n_last_bkt = __bkt; 1945 do 1946 { 1947 __n_last = __n_last->_M_next(); 1948 if (!__n_last) 1949 break; 1950 __n_last_bkt = _M_bucket_index(__n_last); 1951 } 1952 while (__n_last_bkt == __bkt && this->_M_equals(__k, __code, __n_last)); 1953 1954 // Deallocate nodes. 1955 size_type __result = 0; 1956 do 1957 { 1958 __node_type* __p = __n->_M_next(); 1959 this->_M_deallocate_node(__n); 1960 __n = __p; 1961 ++__result; 1962 --_M_element_count; 1963 } 1964 while (__n != __n_last); 1965 1966 if (__prev_n == _M_buckets[__bkt]) 1967 _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt); 1968 else if (__n_last && __n_last_bkt != __bkt) 1969 _M_buckets[__n_last_bkt] = __prev_n; 1970 __prev_n->_M_nxt = __n_last; 1971 return __result; 1972 } 1973 1974 template<typename _Key, typename _Value, 1975 typename _Alloc, typename _ExtractKey, typename _Equal, 1976 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 1977 typename _Traits> 1978 auto 1979 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 1980 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 1981 erase(const_iterator __first, const_iterator __last) 1982 -> iterator 1983 { 1984 __node_type* __n = __first._M_cur; 1985 __node_type* __last_n = __last._M_cur; 1986 if (__n == __last_n) 1987 return iterator(__n); 1988 1989 std::size_t __bkt = _M_bucket_index(__n); 1990 1991 __node_base* __prev_n = _M_get_previous_node(__bkt, __n); 1992 bool __is_bucket_begin = __n == _M_bucket_begin(__bkt); 1993 std::size_t __n_bkt = __bkt; 1994 for (;;) 1995 { 1996 do 1997 { 1998 __node_type* __tmp = __n; 1999 __n = __n->_M_next(); 2000 this->_M_deallocate_node(__tmp); 2001 --_M_element_count; 2002 if (!__n) 2003 break; 2004 __n_bkt = _M_bucket_index(__n); 2005 } 2006 while (__n != __last_n && __n_bkt == __bkt); 2007 if (__is_bucket_begin) 2008 _M_remove_bucket_begin(__bkt, __n, __n_bkt); 2009 if (__n == __last_n) 2010 break; 2011 __is_bucket_begin = true; 2012 __bkt = __n_bkt; 2013 } 2014 2015 if (__n && (__n_bkt != __bkt || __is_bucket_begin)) 2016 _M_buckets[__n_bkt] = __prev_n; 2017 __prev_n->_M_nxt = __n; 2018 return iterator(__n); 2019 } 2020 2021 template<typename _Key, typename _Value, 2022 typename _Alloc, typename _ExtractKey, typename _Equal, 2023 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 2024 typename _Traits> 2025 void 2026 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 2027 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 2028 clear() noexcept 2029 { 2030 this->_M_deallocate_nodes(_M_begin()); 2031 __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(__bucket_type)); 2032 _M_element_count = 0; 2033 _M_before_begin._M_nxt = nullptr; 2034 } 2035 2036 template<typename _Key, typename _Value, 2037 typename _Alloc, typename _ExtractKey, typename _Equal, 2038 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 2039 typename _Traits> 2040 void 2041 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 2042 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 2043 rehash(size_type __bkt_count) 2044 { 2045 const __rehash_state& __saved_state = _M_rehash_policy._M_state(); 2046 __bkt_count 2047 = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1), 2048 __bkt_count); 2049 __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count); 2050 2051 if (__bkt_count != _M_bucket_count) 2052 _M_rehash(__bkt_count, __saved_state); 2053 else 2054 // No rehash, restore previous state to keep it consistent with 2055 // container state. 2056 _M_rehash_policy._M_reset(__saved_state); 2057 } 2058 2059 template<typename _Key, typename _Value, 2060 typename _Alloc, typename _ExtractKey, typename _Equal, 2061 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 2062 typename _Traits> 2063 void 2064 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 2065 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 2066 _M_rehash(size_type __bkt_count, const __rehash_state& __state) 2067 { 2068 __try 2069 { 2070 _M_rehash_aux(__bkt_count, __unique_keys()); 2071 } 2072 __catch(...) 2073 { 2074 // A failure here means that buckets allocation failed. We only 2075 // have to restore hash policy previous state. 2076 _M_rehash_policy._M_reset(__state); 2077 __throw_exception_again; 2078 } 2079 } 2080 2081 // Rehash when there is no equivalent elements. 2082 template<typename _Key, typename _Value, 2083 typename _Alloc, typename _ExtractKey, typename _Equal, 2084 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 2085 typename _Traits> 2086 void 2087 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 2088 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 2089 _M_rehash_aux(size_type __bkt_count, true_type) 2090 { 2091 __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count); 2092 __node_type* __p = _M_begin(); 2093 _M_before_begin._M_nxt = nullptr; 2094 std::size_t __bbegin_bkt = 0; 2095 while (__p) 2096 { 2097 __node_type* __next = __p->_M_next(); 2098 std::size_t __bkt 2099 = __hash_code_base::_M_bucket_index(__p, __bkt_count); 2100 if (!__new_buckets[__bkt]) 2101 { 2102 __p->_M_nxt = _M_before_begin._M_nxt; 2103 _M_before_begin._M_nxt = __p; 2104 __new_buckets[__bkt] = &_M_before_begin; 2105 if (__p->_M_nxt) 2106 __new_buckets[__bbegin_bkt] = __p; 2107 __bbegin_bkt = __bkt; 2108 } 2109 else 2110 { 2111 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt; 2112 __new_buckets[__bkt]->_M_nxt = __p; 2113 } 2114 __p = __next; 2115 } 2116 2117 _M_deallocate_buckets(); 2118 _M_bucket_count = __bkt_count; 2119 _M_buckets = __new_buckets; 2120 } 2121 2122 // Rehash when there can be equivalent elements, preserve their relative 2123 // order. 2124 template<typename _Key, typename _Value, 2125 typename _Alloc, typename _ExtractKey, typename _Equal, 2126 typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, 2127 typename _Traits> 2128 void 2129 _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, 2130 _H1, _H2, _Hash, _RehashPolicy, _Traits>:: 2131 _M_rehash_aux(size_type __bkt_count, false_type) 2132 { 2133 __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count); 2134 2135 __node_type* __p = _M_begin(); 2136 _M_before_begin._M_nxt = nullptr; 2137 std::size_t __bbegin_bkt = 0; 2138 std::size_t __prev_bkt = 0; 2139 __node_type* __prev_p = nullptr; 2140 bool __check_bucket = false; 2141 2142 while (__p) 2143 { 2144 __node_type* __next = __p->_M_next(); 2145 std::size_t __bkt 2146 = __hash_code_base::_M_bucket_index(__p, __bkt_count); 2147 2148 if (__prev_p && __prev_bkt == __bkt) 2149 { 2150 // Previous insert was already in this bucket, we insert after 2151 // the previously inserted one to preserve equivalent elements 2152 // relative order. 2153 __p->_M_nxt = __prev_p->_M_nxt; 2154 __prev_p->_M_nxt = __p; 2155 2156 // Inserting after a node in a bucket require to check that we 2157 // haven't change the bucket last node, in this case next 2158 // bucket containing its before begin node must be updated. We 2159 // schedule a check as soon as we move out of the sequence of 2160 // equivalent nodes to limit the number of checks. 2161 __check_bucket = true; 2162 } 2163 else 2164 { 2165 if (__check_bucket) 2166 { 2167 // Check if we shall update the next bucket because of 2168 // insertions into __prev_bkt bucket. 2169 if (__prev_p->_M_nxt) 2170 { 2171 std::size_t __next_bkt 2172 = __hash_code_base::_M_bucket_index(__prev_p->_M_next(), 2173 __bkt_count); 2174 if (__next_bkt != __prev_bkt) 2175 __new_buckets[__next_bkt] = __prev_p; 2176 } 2177 __check_bucket = false; 2178 } 2179 2180 if (!__new_buckets[__bkt]) 2181 { 2182 __p->_M_nxt = _M_before_begin._M_nxt; 2183 _M_before_begin._M_nxt = __p; 2184 __new_buckets[__bkt] = &_M_before_begin; 2185 if (__p->_M_nxt) 2186 __new_buckets[__bbegin_bkt] = __p; 2187 __bbegin_bkt = __bkt; 2188 } 2189 else 2190 { 2191 __p->_M_nxt = __new_buckets[__bkt]->_M_nxt; 2192 __new_buckets[__bkt]->_M_nxt = __p; 2193 } 2194 } 2195 __prev_p = __p; 2196 __prev_bkt = __bkt; 2197 __p = __next; 2198 } 2199 2200 if (__check_bucket && __prev_p->_M_nxt) 2201 { 2202 std::size_t __next_bkt 2203 = __hash_code_base::_M_bucket_index(__prev_p->_M_next(), 2204 __bkt_count); 2205 if (__next_bkt != __prev_bkt) 2206 __new_buckets[__next_bkt] = __prev_p; 2207 } 2208 2209 _M_deallocate_buckets(); 2210 _M_bucket_count = __bkt_count; 2211 _M_buckets = __new_buckets; 2212 } 2213 2214 #if __cplusplus > 201402L 2215 template<typename, typename, typename> class _Hash_merge_helper { }; 2216 #endif // C++17 2217 2218 #if __cpp_deduction_guides >= 201606 2219 // Used to constrain deduction guides 2220 template<typename _Hash> 2221 using _RequireNotAllocatorOrIntegral 2222 = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>; 2223 #endif 2224 2225 _GLIBCXX_END_NAMESPACE_VERSION 2226 } // namespace std 2227 2228 #endif // _HASHTABLE_H 2229