xref: /llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map.h (revision 65b4a7704beaf87c5eb12b672a837501a981a77a)
1 //===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is fork of llvm/ADT/DenseMap.h class with the following changes:
10 //  * Use mmap to allocate.
11 //  * No iterators.
12 //  * Does not shrink.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef SANITIZER_DENSE_MAP_H
17 #define SANITIZER_DENSE_MAP_H
18 
19 #include "sanitizer_common.h"
20 #include "sanitizer_dense_map_info.h"
21 #include "sanitizer_internal_defs.h"
22 #include "sanitizer_type_traits.h"
23 
24 namespace __sanitizer {
25 
26 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
27           typename BucketT>
28 class DenseMapBase {
29  public:
30   using size_type = unsigned;
31   using key_type = KeyT;
32   using mapped_type = ValueT;
33   using value_type = BucketT;
34 
35   WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
36   unsigned size() const { return getNumEntries(); }
37 
38   /// Grow the densemap so that it can contain at least \p NumEntries items
39   /// before resizing again.
40   void reserve(size_type NumEntries) {
41     auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
42     if (NumBuckets > getNumBuckets())
43       grow(NumBuckets);
44   }
45 
46   void clear() {
47     if (getNumEntries() == 0 && getNumTombstones() == 0)
48       return;
49 
50     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
51     if (__sanitizer::is_trivially_destructible<ValueT>::value) {
52       // Use a simpler loop when values don't need destruction.
53       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
54         P->getFirst() = EmptyKey;
55     } else {
56       unsigned NumEntries = getNumEntries();
57       for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
58         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
59           if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
60             P->getSecond().~ValueT();
61             --NumEntries;
62           }
63           P->getFirst() = EmptyKey;
64         }
65       }
66       CHECK_EQ(NumEntries, 0);
67     }
68     setNumEntries(0);
69     setNumTombstones(0);
70   }
71 
72   /// Return true if the specified key is in the map, false otherwise.
73   bool contains(const KeyT &Key) const { return doFind(Key) != nullptr; }
74 
75   /// Return 1 if the specified key is in the map, 0 otherwise.
76   size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; }
77 
78   value_type *find(const KeyT &Key) { return doFind(Key); }
79   const value_type *find(const KeyT &Key) const { return doFind(Key); }
80 
81   /// Alternate version of find() which allows a different, and possibly
82   /// less expensive, key type.
83   /// The DenseMapInfo is responsible for supplying methods
84   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
85   /// type used.
86   template <class LookupKeyT>
87   value_type *find_as(const LookupKeyT &Key) {
88     return doFind(Key);
89   }
90   template <class LookupKeyT>
91   const value_type *find_as(const LookupKeyT &Key) const {
92     return doFind(Key);
93   }
94 
95   /// lookup - Return the entry for the specified key, or a default
96   /// constructed value if no such entry exists.
97   ValueT lookup(const KeyT &Key) const {
98     if (const BucketT *Bucket = doFind(Key))
99       return Bucket->getSecond();
100     return ValueT();
101   }
102 
103   // Inserts key,value pair into the map if the key isn't already in the map.
104   // If the key is already in the map, it returns false and doesn't update the
105   // value.
106   detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
107     return try_emplace(KV.first, KV.second);
108   }
109 
110   // Inserts key,value pair into the map if the key isn't already in the map.
111   // If the key is already in the map, it returns false and doesn't update the
112   // value.
113   detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
114     return try_emplace(__sanitizer::move(KV.first),
115                        __sanitizer::move(KV.second));
116   }
117 
118   // Inserts key,value pair into the map if the key isn't already in the map.
119   // The value is constructed in-place if the key is not in the map, otherwise
120   // it is not moved.
121   template <typename... Ts>
122   detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
123                                                        Ts &&...Args) {
124     BucketT *TheBucket;
125     if (LookupBucketFor(Key, TheBucket))
126       return {TheBucket, false};  // Already in map.
127 
128     // Otherwise, insert the new element.
129     TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
130                                  __sanitizer::forward<Ts>(Args)...);
131     return {TheBucket, true};
132   }
133 
134   // Inserts key,value pair into the map if the key isn't already in the map.
135   // The value is constructed in-place if the key is not in the map, otherwise
136   // it is not moved.
137   template <typename... Ts>
138   detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
139                                                        Ts &&...Args) {
140     BucketT *TheBucket;
141     if (LookupBucketFor(Key, TheBucket))
142       return {TheBucket, false};  // Already in map.
143 
144     // Otherwise, insert the new element.
145     TheBucket =
146         InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
147     return {TheBucket, true};
148   }
149 
150   /// Alternate version of insert() which allows a different, and possibly
151   /// less expensive, key type.
152   /// The DenseMapInfo is responsible for supplying methods
153   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
154   /// type used.
155   template <typename LookupKeyT>
156   detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
157                                                      const LookupKeyT &Val) {
158     BucketT *TheBucket;
159     if (LookupBucketFor(Val, TheBucket))
160       return {TheBucket, false};  // Already in map.
161 
162     // Otherwise, insert the new element.
163     TheBucket =
164         InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
165                                    __sanitizer::move(KV.second), Val);
166     return {TheBucket, true};
167   }
168 
169   bool erase(const KeyT &Val) {
170     BucketT *TheBucket = doFind(Val);
171     if (!TheBucket)
172       return false;  // not in map.
173 
174     TheBucket->getSecond().~ValueT();
175     TheBucket->getFirst() = getTombstoneKey();
176     decrementNumEntries();
177     incrementNumTombstones();
178     return true;
179   }
180 
181   void erase(value_type *I) {
182     CHECK_NE(I, nullptr);
183     BucketT *TheBucket = &*I;
184     TheBucket->getSecond().~ValueT();
185     TheBucket->getFirst() = getTombstoneKey();
186     decrementNumEntries();
187     incrementNumTombstones();
188   }
189 
190   value_type &FindAndConstruct(const KeyT &Key) {
191     BucketT *TheBucket;
192     if (LookupBucketFor(Key, TheBucket))
193       return *TheBucket;
194 
195     return *InsertIntoBucket(TheBucket, Key);
196   }
197 
198   ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
199 
200   value_type &FindAndConstruct(KeyT &&Key) {
201     BucketT *TheBucket;
202     if (LookupBucketFor(Key, TheBucket))
203       return *TheBucket;
204 
205     return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
206   }
207 
208   ValueT &operator[](KeyT &&Key) {
209     return FindAndConstruct(__sanitizer::move(Key)).second;
210   }
211 
212   /// Iterate over active entries of the container.
213   ///
214   /// Function can return fast to stop the process.
215   template <class Fn>
216   void forEach(Fn fn) {
217     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
218     for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
219       const KeyT K = P->getFirst();
220       if (!KeyInfoT::isEqual(K, EmptyKey) &&
221           !KeyInfoT::isEqual(K, TombstoneKey)) {
222         if (!fn(*P))
223           return;
224       }
225     }
226   }
227 
228   template <class Fn>
229   void forEach(Fn fn) const {
230     const_cast<DenseMapBase *>(this)->forEach(
231         [&](const value_type &KV) { return fn(KV); });
232   }
233 
234  protected:
235   DenseMapBase() = default;
236 
237   void destroyAll() {
238     if (getNumBuckets() == 0)  // Nothing to do.
239       return;
240 
241     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
242     for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
243       if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
244           !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
245         P->getSecond().~ValueT();
246       P->getFirst().~KeyT();
247     }
248   }
249 
250   void initEmpty() {
251     setNumEntries(0);
252     setNumTombstones(0);
253 
254     CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
255     const KeyT EmptyKey = getEmptyKey();
256     for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
257       ::new (&B->getFirst()) KeyT(EmptyKey);
258   }
259 
260   /// Returns the number of buckets to allocate to ensure that the DenseMap can
261   /// accommodate \p NumEntries without need to grow().
262   unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
263     // Ensure that "NumEntries * 4 < NumBuckets * 3"
264     if (NumEntries == 0)
265       return 0;
266     // +1 is required because of the strict equality.
267     // For example if NumEntries is 48, we need to return 401.
268     return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
269   }
270 
271   void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
272     initEmpty();
273 
274     // Insert all the old elements.
275     const KeyT EmptyKey = getEmptyKey();
276     const KeyT TombstoneKey = getTombstoneKey();
277     for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
278       if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
279           !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
280         // Insert the key/value into the new table.
281         BucketT *DestBucket;
282         bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
283         (void)FoundVal;  // silence warning.
284         CHECK(!FoundVal);
285         DestBucket->getFirst() = __sanitizer::move(B->getFirst());
286         ::new (&DestBucket->getSecond())
287             ValueT(__sanitizer::move(B->getSecond()));
288         incrementNumEntries();
289 
290         // Free the value.
291         B->getSecond().~ValueT();
292       }
293       B->getFirst().~KeyT();
294     }
295   }
296 
297   template <typename OtherBaseT>
298   void copyFrom(
299       const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
300     CHECK_NE(&other, this);
301     CHECK_EQ(getNumBuckets(), other.getNumBuckets());
302 
303     setNumEntries(other.getNumEntries());
304     setNumTombstones(other.getNumTombstones());
305 
306     if (__sanitizer::is_trivially_copyable<KeyT>::value &&
307         __sanitizer::is_trivially_copyable<ValueT>::value)
308       internal_memcpy(reinterpret_cast<void *>(getBuckets()),
309                       other.getBuckets(), getNumBuckets() * sizeof(BucketT));
310     else
311       for (uptr i = 0; i < getNumBuckets(); ++i) {
312         ::new (&getBuckets()[i].getFirst())
313             KeyT(other.getBuckets()[i].getFirst());
314         if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
315             !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
316           ::new (&getBuckets()[i].getSecond())
317               ValueT(other.getBuckets()[i].getSecond());
318       }
319   }
320 
321   static unsigned getHashValue(const KeyT &Val) {
322     return KeyInfoT::getHashValue(Val);
323   }
324 
325   template <typename LookupKeyT>
326   static unsigned getHashValue(const LookupKeyT &Val) {
327     return KeyInfoT::getHashValue(Val);
328   }
329 
330   static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
331 
332   static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
333 
334  private:
335   unsigned getNumEntries() const {
336     return static_cast<const DerivedT *>(this)->getNumEntries();
337   }
338 
339   void setNumEntries(unsigned Num) {
340     static_cast<DerivedT *>(this)->setNumEntries(Num);
341   }
342 
343   void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
344 
345   void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
346 
347   unsigned getNumTombstones() const {
348     return static_cast<const DerivedT *>(this)->getNumTombstones();
349   }
350 
351   void setNumTombstones(unsigned Num) {
352     static_cast<DerivedT *>(this)->setNumTombstones(Num);
353   }
354 
355   void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
356 
357   void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
358 
359   const BucketT *getBuckets() const {
360     return static_cast<const DerivedT *>(this)->getBuckets();
361   }
362 
363   BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
364 
365   unsigned getNumBuckets() const {
366     return static_cast<const DerivedT *>(this)->getNumBuckets();
367   }
368 
369   BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
370 
371   const BucketT *getBucketsEnd() const {
372     return getBuckets() + getNumBuckets();
373   }
374 
375   void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
376 
377   template <typename KeyArg, typename... ValueArgs>
378   BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
379                             ValueArgs &&...Values) {
380     TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
381 
382     TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
383     ::new (&TheBucket->getSecond())
384         ValueT(__sanitizer::forward<ValueArgs>(Values)...);
385     return TheBucket;
386   }
387 
388   template <typename LookupKeyT>
389   BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
390                                       ValueT &&Value, LookupKeyT &Lookup) {
391     TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
392 
393     TheBucket->getFirst() = __sanitizer::move(Key);
394     ::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
395     return TheBucket;
396   }
397 
398   template <typename LookupKeyT>
399   BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
400                                 BucketT *TheBucket) {
401     // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
402     // the buckets are empty (meaning that many are filled with tombstones),
403     // grow the table.
404     //
405     // The later case is tricky.  For example, if we had one empty bucket with
406     // tons of tombstones, failing lookups (e.g. for insertion) would have to
407     // probe almost the entire table until it found the empty bucket.  If the
408     // table completely filled with tombstones, no lookup would ever succeed,
409     // causing infinite loops in lookup.
410     unsigned NewNumEntries = getNumEntries() + 1;
411     unsigned NumBuckets = getNumBuckets();
412     if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
413       this->grow(NumBuckets * 2);
414       LookupBucketFor(Lookup, TheBucket);
415       NumBuckets = getNumBuckets();
416     } else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
417                         NumBuckets / 8)) {
418       this->grow(NumBuckets);
419       LookupBucketFor(Lookup, TheBucket);
420     }
421     CHECK(TheBucket);
422 
423     // Only update the state after we've grown our bucket space appropriately
424     // so that when growing buckets we have self-consistent entry count.
425     incrementNumEntries();
426 
427     // If we are writing over a tombstone, remember this.
428     const KeyT EmptyKey = getEmptyKey();
429     if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
430       decrementNumTombstones();
431 
432     return TheBucket;
433   }
434 
435   template <typename LookupKeyT>
436   BucketT *doFind(const LookupKeyT &Val) {
437     BucketT *BucketsPtr = getBuckets();
438     const unsigned NumBuckets = getNumBuckets();
439     if (NumBuckets == 0)
440       return nullptr;
441 
442     const KeyT EmptyKey = getEmptyKey();
443     unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
444     unsigned ProbeAmt = 1;
445     while (true) {
446       BucketT *Bucket = BucketsPtr + BucketNo;
447       if (LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
448         return Bucket;
449       if (LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
450         return nullptr;
451 
452       // Otherwise, it's a hash collision or a tombstone, continue quadratic
453       // probing.
454       BucketNo += ProbeAmt++;
455       BucketNo &= NumBuckets - 1;
456     }
457   }
458 
459   template <typename LookupKeyT>
460   const BucketT *doFind(const LookupKeyT &Val) const {
461     return const_cast<DenseMapBase *>(this)->doFind(Val);
462   }
463 
464   /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
465   /// FoundBucket.  If the bucket contains the key and a value, this returns
466   /// true, otherwise it returns a bucket with an empty marker or tombstone and
467   /// returns false.
468   template <typename LookupKeyT>
469   bool LookupBucketFor(const LookupKeyT &Val,
470                        const BucketT *&FoundBucket) const {
471     const BucketT *BucketsPtr = getBuckets();
472     const unsigned NumBuckets = getNumBuckets();
473 
474     if (NumBuckets == 0) {
475       FoundBucket = nullptr;
476       return false;
477     }
478 
479     // FoundTombstone - Keep track of whether we find a tombstone while probing.
480     const BucketT *FoundTombstone = nullptr;
481     const KeyT EmptyKey = getEmptyKey();
482     const KeyT TombstoneKey = getTombstoneKey();
483     CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
484     CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
485 
486     unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
487     unsigned ProbeAmt = 1;
488     while (true) {
489       const BucketT *ThisBucket = BucketsPtr + BucketNo;
490       // Found Val's bucket?  If so, return it.
491       if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
492         FoundBucket = ThisBucket;
493         return true;
494       }
495 
496       // If we found an empty bucket, the key doesn't exist in the set.
497       // Insert it and return the default value.
498       if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
499         // If we've already seen a tombstone while probing, fill it in instead
500         // of the empty bucket we eventually probed to.
501         FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
502         return false;
503       }
504 
505       // If this is a tombstone, remember it.  If Val ends up not in the map, we
506       // prefer to return it than something that would require more probing.
507       if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
508           !FoundTombstone)
509         FoundTombstone = ThisBucket;  // Remember the first tombstone found.
510 
511       // Otherwise, it's a hash collision or a tombstone, continue quadratic
512       // probing.
513       BucketNo += ProbeAmt++;
514       BucketNo &= (NumBuckets - 1);
515     }
516   }
517 
518   template <typename LookupKeyT>
519   bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
520     const BucketT *ConstFoundBucket;
521     bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
522         Val, ConstFoundBucket);
523     FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
524     return Result;
525   }
526 
527  public:
528   /// Return the approximate size (in bytes) of the actual map.
529   /// This is just the raw memory used by DenseMap.
530   /// If entries are pointers to objects, the size of the referenced objects
531   /// are not included.
532   uptr getMemorySize() const {
533     return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
534   }
535 };
536 
537 /// Equality comparison for DenseMap.
538 ///
539 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
540 /// is also in RHS, and that no additional pairs are in RHS.
541 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
542 /// complexity is linear, worst case is O(N^2) (if every hash collides).
543 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
544           typename BucketT>
545 bool operator==(
546     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
547     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
548   if (LHS.size() != RHS.size())
549     return false;
550 
551   bool R = true;
552   LHS.forEach(
553       [&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
554                                       BucketT>::value_type &KV) -> bool {
555         const auto *I = RHS.find(KV.first);
556         if (!I || I->second != KV.second) {
557           R = false;
558           return false;
559         }
560         return true;
561       });
562 
563   return R;
564 }
565 
566 /// Inequality comparison for DenseMap.
567 ///
568 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
569 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
570           typename BucketT>
571 bool operator!=(
572     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
573     const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
574   return !(LHS == RHS);
575 }
576 
577 template <typename KeyT, typename ValueT,
578           typename KeyInfoT = DenseMapInfo<KeyT>,
579           typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
580 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
581                                      KeyT, ValueT, KeyInfoT, BucketT> {
582   friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
583 
584   // Lift some types from the dependent base class into this class for
585   // simplicity of referring to them.
586   using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
587 
588   BucketT *Buckets = nullptr;
589   unsigned NumEntries = 0;
590   unsigned NumTombstones = 0;
591   unsigned NumBuckets = 0;
592 
593  public:
594   /// Create a DenseMap with an optional \p InitialReserve that guarantee that
595   /// this number of elements can be inserted in the map without grow()
596   explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }
597   constexpr DenseMap() = default;
598 
599   DenseMap(const DenseMap &other) : BaseT() {
600     init(0);
601     copyFrom(other);
602   }
603 
604   DenseMap(DenseMap &&other) : BaseT() {
605     init(0);
606     swap(other);
607   }
608 
609   ~DenseMap() {
610     this->destroyAll();
611     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
612   }
613 
614   void swap(DenseMap &RHS) {
615     Swap(Buckets, RHS.Buckets);
616     Swap(NumEntries, RHS.NumEntries);
617     Swap(NumTombstones, RHS.NumTombstones);
618     Swap(NumBuckets, RHS.NumBuckets);
619   }
620 
621   DenseMap &operator=(const DenseMap &other) {
622     if (&other != this)
623       copyFrom(other);
624     return *this;
625   }
626 
627   DenseMap &operator=(DenseMap &&other) {
628     this->destroyAll();
629     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
630     init(0);
631     swap(other);
632     return *this;
633   }
634 
635   void copyFrom(const DenseMap &other) {
636     this->destroyAll();
637     deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
638     if (allocateBuckets(other.NumBuckets)) {
639       this->BaseT::copyFrom(other);
640     } else {
641       NumEntries = 0;
642       NumTombstones = 0;
643     }
644   }
645 
646   void init(unsigned InitNumEntries) {
647     auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
648     if (allocateBuckets(InitBuckets)) {
649       this->BaseT::initEmpty();
650     } else {
651       NumEntries = 0;
652       NumTombstones = 0;
653     }
654   }
655 
656   void grow(unsigned AtLeast) {
657     unsigned OldNumBuckets = NumBuckets;
658     BucketT *OldBuckets = Buckets;
659 
660     allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));
661     CHECK(Buckets);
662     if (!OldBuckets) {
663       this->BaseT::initEmpty();
664       return;
665     }
666 
667     this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
668 
669     // Free the old table.
670     deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);
671   }
672 
673  private:
674   unsigned getNumEntries() const { return NumEntries; }
675 
676   void setNumEntries(unsigned Num) { NumEntries = Num; }
677 
678   unsigned getNumTombstones() const { return NumTombstones; }
679 
680   void setNumTombstones(unsigned Num) { NumTombstones = Num; }
681 
682   BucketT *getBuckets() const { return Buckets; }
683 
684   unsigned getNumBuckets() const { return NumBuckets; }
685 
686   bool allocateBuckets(unsigned Num) {
687     NumBuckets = Num;
688     if (NumBuckets == 0) {
689       Buckets = nullptr;
690       return false;
691     }
692 
693     uptr Size = sizeof(BucketT) * NumBuckets;
694     if (Size * 2 <= GetPageSizeCached()) {
695       // We always allocate at least a page, so use entire space.
696       unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);
697       Size <<= Log2;
698       NumBuckets <<= Log2;
699       CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
700       CHECK_GT(Size * 2, GetPageSizeCached());
701     }
702     Buckets = static_cast<BucketT *>(allocate_buffer(Size));
703     return true;
704   }
705 
706   static void *allocate_buffer(uptr Size) {
707     return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap");
708   }
709 
710   static void deallocate_buffer(void *Ptr, uptr Size) {
711     UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));
712   }
713 };
714 
715 }  // namespace __sanitizer
716 
717 #endif  // SANITIZER_DENSE_MAP_H
718