xref: /llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h (revision 832ba20710ee09b00161ea72cf80c9af800fda63)
19835a815SKostya Serebryany //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
29835a815SKostya Serebryany //
32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information.
52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
69835a815SKostya Serebryany //
79835a815SKostya Serebryany //===----------------------------------------------------------------------===//
89835a815SKostya Serebryany //
99835a815SKostya Serebryany // Part of the Sanitizer Allocator.
109835a815SKostya Serebryany //
119835a815SKostya Serebryany //===----------------------------------------------------------------------===//
129835a815SKostya Serebryany #ifndef SANITIZER_ALLOCATOR_H
139835a815SKostya Serebryany #error This file must be included inside sanitizer_allocator.h
149835a815SKostya Serebryany #endif
159835a815SKostya Serebryany 
16ce216345SKostya Serebryany // Cache used by SizeClassAllocator64.
17ce216345SKostya Serebryany template <class SizeClassAllocator>
18ce216345SKostya Serebryany struct SizeClassAllocator64LocalCache {
199835a815SKostya Serebryany   typedef SizeClassAllocator Allocator;
20f990da59SVitaly Buka   typedef MemoryMapper<Allocator> MemoryMapperT;
219835a815SKostya Serebryany 
InitSizeClassAllocator64LocalCache229835a815SKostya Serebryany   void Init(AllocatorGlobalStats *s) {
239835a815SKostya Serebryany     stats_.Init();
249835a815SKostya Serebryany     if (s)
259835a815SKostya Serebryany       s->Register(&stats_);
269835a815SKostya Serebryany   }
279835a815SKostya Serebryany 
DestroySizeClassAllocator64LocalCache289835a815SKostya Serebryany   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
299835a815SKostya Serebryany     Drain(allocator);
309835a815SKostya Serebryany     if (s)
319835a815SKostya Serebryany       s->Unregister(&stats_);
329835a815SKostya Serebryany   }
339835a815SKostya Serebryany 
AllocateSizeClassAllocator64LocalCache349835a815SKostya Serebryany   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
359835a815SKostya Serebryany     CHECK_NE(class_id, 0UL);
369835a815SKostya Serebryany     CHECK_LT(class_id, kNumClasses);
379835a815SKostya Serebryany     PerClass *c = &per_class_[class_id];
3801676883SAlex Shlyapnikov     if (UNLIKELY(c->count == 0)) {
3901676883SAlex Shlyapnikov       if (UNLIKELY(!Refill(c, allocator, class_id)))
4001676883SAlex Shlyapnikov         return nullptr;
415c6e6c28SKostya Kortchinsky       DCHECK_GT(c->count, 0);
4201676883SAlex Shlyapnikov     }
438e7ea9ddSKostya Serebryany     CompactPtrT chunk = c->chunks[--c->count];
445c6e6c28SKostya Kortchinsky     stats_.Add(AllocatorStatAllocated, c->class_size);
455c6e6c28SKostya Kortchinsky     return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
468e7ea9ddSKostya Serebryany         allocator->GetRegionBeginBySizeClass(class_id), chunk));
479835a815SKostya Serebryany   }
489835a815SKostya Serebryany 
DeallocateSizeClassAllocator64LocalCache499835a815SKostya Serebryany   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
509835a815SKostya Serebryany     CHECK_NE(class_id, 0UL);
519835a815SKostya Serebryany     CHECK_LT(class_id, kNumClasses);
529835a815SKostya Serebryany     // If the first allocator call on a new thread is a deallocation, then
539835a815SKostya Serebryany     // max_count will be zero, leading to check failure.
549835a815SKostya Serebryany     PerClass *c = &per_class_[class_id];
555c6e6c28SKostya Kortchinsky     InitCache(c);
569835a815SKostya Serebryany     if (UNLIKELY(c->count == c->max_count))
57afa3fedcSVitaly Buka       DrainHalfMax(c, allocator, class_id);
588e7ea9ddSKostya Serebryany     CompactPtrT chunk = allocator->PointerToCompactPtr(
598e7ea9ddSKostya Serebryany         allocator->GetRegionBeginBySizeClass(class_id),
608e7ea9ddSKostya Serebryany         reinterpret_cast<uptr>(p));
618e7ea9ddSKostya Serebryany     c->chunks[c->count++] = chunk;
625c6e6c28SKostya Kortchinsky     stats_.Sub(AllocatorStatAllocated, c->class_size);
639835a815SKostya Serebryany   }
649835a815SKostya Serebryany 
DrainSizeClassAllocator64LocalCache659835a815SKostya Serebryany   void Drain(SizeClassAllocator *allocator) {
66*832ba207SDmitry Vyukov     MemoryMapperT memory_mapper(*allocator);
671022220bSKostya Kortchinsky     for (uptr i = 1; i < kNumClasses; i++) {
68476f21d8SKostya Kortchinsky       PerClass *c = &per_class_[i];
69*832ba207SDmitry Vyukov       while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
709835a815SKostya Serebryany     }
719835a815SKostya Serebryany   }
729835a815SKostya Serebryany 
73476f21d8SKostya Kortchinsky  private:
74476f21d8SKostya Kortchinsky   typedef typename Allocator::SizeClassMapT SizeClassMap;
75476f21d8SKostya Kortchinsky   static const uptr kNumClasses = SizeClassMap::kNumClasses;
76476f21d8SKostya Kortchinsky   typedef typename Allocator::CompactPtrT CompactPtrT;
77476f21d8SKostya Kortchinsky 
789835a815SKostya Serebryany   struct PerClass {
798e7ea9ddSKostya Serebryany     u32 count;
808e7ea9ddSKostya Serebryany     u32 max_count;
818b2caae9SAlex Shlyapnikov     uptr class_size;
828e7ea9ddSKostya Serebryany     CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
839835a815SKostya Serebryany   };
849835a815SKostya Serebryany   PerClass per_class_[kNumClasses];
859835a815SKostya Serebryany   AllocatorStats stats_;
869835a815SKostya Serebryany 
InitCacheSizeClassAllocator64LocalCache875c6e6c28SKostya Kortchinsky   void InitCache(PerClass *c) {
885c6e6c28SKostya Kortchinsky     if (LIKELY(c->max_count))
899835a815SKostya Serebryany       return;
901022220bSKostya Kortchinsky     for (uptr i = 1; i < kNumClasses; i++) {
919835a815SKostya Serebryany       PerClass *c = &per_class_[i];
921022220bSKostya Kortchinsky       const uptr size = Allocator::ClassIdToSize(i);
931022220bSKostya Kortchinsky       c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
941022220bSKostya Kortchinsky       c->class_size = size;
959835a815SKostya Serebryany     }
965c6e6c28SKostya Kortchinsky     DCHECK_NE(c->max_count, 0UL);
979835a815SKostya Serebryany   }
989835a815SKostya Serebryany 
RefillSizeClassAllocator64LocalCache9901676883SAlex Shlyapnikov   NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
1008e7ea9ddSKostya Serebryany                        uptr class_id) {
1015c6e6c28SKostya Kortchinsky     InitCache(c);
1025c6e6c28SKostya Kortchinsky     const uptr num_requested_chunks = c->max_count / 2;
10301676883SAlex Shlyapnikov     if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
10401676883SAlex Shlyapnikov                                               num_requested_chunks)))
10501676883SAlex Shlyapnikov       return false;
1068e7ea9ddSKostya Serebryany     c->count = num_requested_chunks;
10701676883SAlex Shlyapnikov     return true;
108769ec705SVitaly Buka   }
109*832ba207SDmitry Vyukov 
DrainHalfMaxSizeClassAllocator64LocalCache110afa3fedcSVitaly Buka   NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
111afa3fedcSVitaly Buka                              uptr class_id) {
112*832ba207SDmitry Vyukov     MemoryMapperT memory_mapper(*allocator);
113*832ba207SDmitry Vyukov     Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
114afa3fedcSVitaly Buka   }
115769ec705SVitaly Buka 
DrainSizeClassAllocator64LocalCache116*832ba207SDmitry Vyukov   void Drain(MemoryMapperT *memory_mapper, PerClass *c,
117*832ba207SDmitry Vyukov              SizeClassAllocator *allocator, uptr class_id, uptr count) {
1188e7ea9ddSKostya Serebryany     CHECK_GE(c->count, count);
1195c6e6c28SKostya Kortchinsky     const uptr first_idx_to_drain = c->count - count;
1208e7ea9ddSKostya Serebryany     c->count -= count;
121*832ba207SDmitry Vyukov     allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
1228e7ea9ddSKostya Serebryany                                  &c->chunks[first_idx_to_drain], count);
1239835a815SKostya Serebryany   }
1249835a815SKostya Serebryany };
125ce216345SKostya Serebryany 
126ce216345SKostya Serebryany // Cache used by SizeClassAllocator32.
127ce216345SKostya Serebryany template <class SizeClassAllocator>
128ce216345SKostya Serebryany struct SizeClassAllocator32LocalCache {
129ce216345SKostya Serebryany   typedef SizeClassAllocator Allocator;
130ce216345SKostya Serebryany   typedef typename Allocator::TransferBatch TransferBatch;
131ce216345SKostya Serebryany 
InitSizeClassAllocator32LocalCache132ce216345SKostya Serebryany   void Init(AllocatorGlobalStats *s) {
133ce216345SKostya Serebryany     stats_.Init();
134ce216345SKostya Serebryany     if (s)
135ce216345SKostya Serebryany       s->Register(&stats_);
136ce216345SKostya Serebryany   }
137ce216345SKostya Serebryany 
138476f21d8SKostya Kortchinsky   // Returns a TransferBatch suitable for class_id.
CreateBatchSizeClassAllocator32LocalCache139476f21d8SKostya Kortchinsky   TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
140476f21d8SKostya Kortchinsky                              TransferBatch *b) {
141476f21d8SKostya Kortchinsky     if (uptr batch_class_id = per_class_[class_id].batch_class_id)
142476f21d8SKostya Kortchinsky       return (TransferBatch*)Allocate(allocator, batch_class_id);
143476f21d8SKostya Kortchinsky     return b;
144476f21d8SKostya Kortchinsky   }
145476f21d8SKostya Kortchinsky 
146476f21d8SKostya Kortchinsky   // Destroys TransferBatch b.
DestroyBatchSizeClassAllocator32LocalCache147476f21d8SKostya Kortchinsky   void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
148476f21d8SKostya Kortchinsky                     TransferBatch *b) {
149476f21d8SKostya Kortchinsky     if (uptr batch_class_id = per_class_[class_id].batch_class_id)
150476f21d8SKostya Kortchinsky       Deallocate(allocator, batch_class_id, b);
151476f21d8SKostya Kortchinsky   }
152476f21d8SKostya Kortchinsky 
DestroySizeClassAllocator32LocalCache153ce216345SKostya Serebryany   void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
154ce216345SKostya Serebryany     Drain(allocator);
155ce216345SKostya Serebryany     if (s)
156ce216345SKostya Serebryany       s->Unregister(&stats_);
157ce216345SKostya Serebryany   }
158ce216345SKostya Serebryany 
AllocateSizeClassAllocator32LocalCache159ce216345SKostya Serebryany   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
160ce216345SKostya Serebryany     CHECK_NE(class_id, 0UL);
161ce216345SKostya Serebryany     CHECK_LT(class_id, kNumClasses);
162ce216345SKostya Serebryany     PerClass *c = &per_class_[class_id];
163f3cc7cc3SAlex Shlyapnikov     if (UNLIKELY(c->count == 0)) {
1645c6e6c28SKostya Kortchinsky       if (UNLIKELY(!Refill(c, allocator, class_id)))
165f3cc7cc3SAlex Shlyapnikov         return nullptr;
1665c6e6c28SKostya Kortchinsky       DCHECK_GT(c->count, 0);
167f3cc7cc3SAlex Shlyapnikov     }
168ce216345SKostya Serebryany     void *res = c->batch[--c->count];
169ce216345SKostya Serebryany     PREFETCH(c->batch[c->count - 1]);
1705c6e6c28SKostya Kortchinsky     stats_.Add(AllocatorStatAllocated, c->class_size);
171ce216345SKostya Serebryany     return res;
172ce216345SKostya Serebryany   }
173ce216345SKostya Serebryany 
DeallocateSizeClassAllocator32LocalCache174ce216345SKostya Serebryany   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
175ce216345SKostya Serebryany     CHECK_NE(class_id, 0UL);
176ce216345SKostya Serebryany     CHECK_LT(class_id, kNumClasses);
177ce216345SKostya Serebryany     // If the first allocator call on a new thread is a deallocation, then
178ce216345SKostya Serebryany     // max_count will be zero, leading to check failure.
179ce216345SKostya Serebryany     PerClass *c = &per_class_[class_id];
1805c6e6c28SKostya Kortchinsky     InitCache(c);
181ce216345SKostya Serebryany     if (UNLIKELY(c->count == c->max_count))
1825c6e6c28SKostya Kortchinsky       Drain(c, allocator, class_id);
183ce216345SKostya Serebryany     c->batch[c->count++] = p;
1845c6e6c28SKostya Kortchinsky     stats_.Sub(AllocatorStatAllocated, c->class_size);
185ce216345SKostya Serebryany   }
186ce216345SKostya Serebryany 
DrainSizeClassAllocator32LocalCache187ce216345SKostya Serebryany   void Drain(SizeClassAllocator *allocator) {
1881022220bSKostya Kortchinsky     for (uptr i = 1; i < kNumClasses; i++) {
189476f21d8SKostya Kortchinsky       PerClass *c = &per_class_[i];
190ce216345SKostya Serebryany       while (c->count > 0)
1915c6e6c28SKostya Kortchinsky         Drain(c, allocator, i);
192ce216345SKostya Serebryany     }
193ce216345SKostya Serebryany   }
194ce216345SKostya Serebryany 
195476f21d8SKostya Kortchinsky  private:
196476f21d8SKostya Kortchinsky   typedef typename Allocator::SizeClassMapT SizeClassMap;
197476f21d8SKostya Kortchinsky   static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
198476f21d8SKostya Kortchinsky   static const uptr kNumClasses = SizeClassMap::kNumClasses;
199476f21d8SKostya Kortchinsky   // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
200476f21d8SKostya Kortchinsky   // allocated from kBatchClassID size class (except for those that are needed
201476f21d8SKostya Kortchinsky   // for kBatchClassID itself). The goal is to have TransferBatches in a totally
202476f21d8SKostya Kortchinsky   // different region of RAM to improve security.
203476f21d8SKostya Kortchinsky   static const bool kUseSeparateSizeClassForBatch =
204476f21d8SKostya Kortchinsky       Allocator::kUseSeparateSizeClassForBatch;
205476f21d8SKostya Kortchinsky 
206ce216345SKostya Serebryany   struct PerClass {
207ce216345SKostya Serebryany     uptr count;
208ce216345SKostya Serebryany     uptr max_count;
2098b2caae9SAlex Shlyapnikov     uptr class_size;
210476f21d8SKostya Kortchinsky     uptr batch_class_id;
211ce216345SKostya Serebryany     void *batch[2 * TransferBatch::kMaxNumCached];
212ce216345SKostya Serebryany   };
213ce216345SKostya Serebryany   PerClass per_class_[kNumClasses];
214ce216345SKostya Serebryany   AllocatorStats stats_;
215ce216345SKostya Serebryany 
InitCacheSizeClassAllocator32LocalCache2165c6e6c28SKostya Kortchinsky   void InitCache(PerClass *c) {
2175c6e6c28SKostya Kortchinsky     if (LIKELY(c->max_count))
218ce216345SKostya Serebryany       return;
219476f21d8SKostya Kortchinsky     const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
2201022220bSKostya Kortchinsky     for (uptr i = 1; i < kNumClasses; i++) {
221ce216345SKostya Serebryany       PerClass *c = &per_class_[i];
2221022220bSKostya Kortchinsky       const uptr size = Allocator::ClassIdToSize(i);
2231022220bSKostya Kortchinsky       const uptr max_cached = TransferBatch::MaxCached(size);
22438199b2aSKostya Kortchinsky       c->max_count = 2 * max_cached;
2251022220bSKostya Kortchinsky       c->class_size = size;
226476f21d8SKostya Kortchinsky       // Precompute the class id to use to store batches for the current class
227476f21d8SKostya Kortchinsky       // id. 0 means the class size is large enough to store a batch within one
228476f21d8SKostya Kortchinsky       // of the chunks. If using a separate size class, it will always be
229476f21d8SKostya Kortchinsky       // kBatchClassID, except for kBatchClassID itself.
230476f21d8SKostya Kortchinsky       if (kUseSeparateSizeClassForBatch) {
231476f21d8SKostya Kortchinsky         c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
232476f21d8SKostya Kortchinsky       } else {
2331022220bSKostya Kortchinsky         c->batch_class_id = (size <
23438199b2aSKostya Kortchinsky           TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
235476f21d8SKostya Kortchinsky               batch_class_id : 0;
236ce216345SKostya Serebryany       }
237ce216345SKostya Serebryany     }
2385c6e6c28SKostya Kortchinsky     DCHECK_NE(c->max_count, 0UL);
239ce216345SKostya Serebryany   }
240ce216345SKostya Serebryany 
RefillSizeClassAllocator32LocalCache2415c6e6c28SKostya Kortchinsky   NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
2425c6e6c28SKostya Kortchinsky                        uptr class_id) {
2435c6e6c28SKostya Kortchinsky     InitCache(c);
244ce216345SKostya Serebryany     TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
245f3cc7cc3SAlex Shlyapnikov     if (UNLIKELY(!b))
246f3cc7cc3SAlex Shlyapnikov       return false;
247ce216345SKostya Serebryany     CHECK_GT(b->Count(), 0);
248ce216345SKostya Serebryany     b->CopyToArray(c->batch);
249ce216345SKostya Serebryany     c->count = b->Count();
250ce216345SKostya Serebryany     DestroyBatch(class_id, allocator, b);
251f3cc7cc3SAlex Shlyapnikov     return true;
252ce216345SKostya Serebryany   }
253ce216345SKostya Serebryany 
DrainSizeClassAllocator32LocalCache2545c6e6c28SKostya Kortchinsky   NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
2555c6e6c28SKostya Kortchinsky                       uptr class_id) {
2565c6e6c28SKostya Kortchinsky     const uptr count = Min(c->max_count / 2, c->count);
2575c6e6c28SKostya Kortchinsky     const uptr first_idx_to_drain = c->count - count;
258ce216345SKostya Serebryany     TransferBatch *b = CreateBatch(
259ce216345SKostya Serebryany         class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
260f3cc7cc3SAlex Shlyapnikov     // Failure to allocate a batch while releasing memory is non recoverable.
261f3cc7cc3SAlex Shlyapnikov     // TODO(alekseys): Figure out how to do it without allocating a new batch.
2627e067ab1SAlex Shlyapnikov     if (UNLIKELY(!b)) {
2637e067ab1SAlex Shlyapnikov       Report("FATAL: Internal error: %s's allocator failed to allocate a "
2647e067ab1SAlex Shlyapnikov              "transfer batch.\n", SanitizerToolName);
2657e067ab1SAlex Shlyapnikov       Die();
2667e067ab1SAlex Shlyapnikov     }
2673a924548SKostya Kortchinsky     b->SetFromArray(&c->batch[first_idx_to_drain], count);
2685c6e6c28SKostya Kortchinsky     c->count -= count;
269ce216345SKostya Serebryany     allocator->DeallocateBatch(&stats_, class_id, b);
270ce216345SKostya Serebryany   }
271ce216345SKostya Serebryany };
272