xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
14 
15 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
16 
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
20 //
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
23 //
24 // Region:
25 //   a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
26 //                                                             kRegionSize).
27 // Since the regions are aligned by kRegionSize, there are exactly
28 // kNumPossibleRegions possible regions in the address space and so we keep
29 // a ByteMap possible_regions to store the size classes of each Region.
30 // 0 size class means the region is not used by the allocator.
31 //
32 // One Region is used to allocate chunks of a single size class.
33 // A Region looks like this:
34 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
35 //
36 // In order to avoid false sharing the objects of this class should be
37 // chache-line aligned.
38 
39 struct SizeClassAllocator32FlagMasks {  //  Bit masks.
40   enum {
41     kRandomShuffleChunks = 1,
42     kUseSeparateSizeClassForBatch = 2,
43   };
44 };
45 
46 template <class Params>
47 class SizeClassAllocator32 {
48  public:
49   static const uptr kSpaceBeg = Params::kSpaceBeg;
50   static const u64 kSpaceSize = Params::kSpaceSize;
51   static const uptr kMetadataSize = Params::kMetadataSize;
52   typedef typename Params::SizeClassMap SizeClassMap;
53   static const uptr kRegionSizeLog = Params::kRegionSizeLog;
54   typedef typename Params::ByteMap ByteMap;
55   typedef typename Params::MapUnmapCallback MapUnmapCallback;
56 
57   COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
58                  (kSpaceSize & (kSpaceSize - 1)) == 0);
59 
60   static const bool kRandomShuffleChunks = Params::kFlags &
61       SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
62   static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
63       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
64 
65   struct TransferBatch {
66     static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
SetFromArrayTransferBatch67     void SetFromArray(void *batch[], uptr count) {
68       DCHECK_LE(count, kMaxNumCached);
69       count_ = count;
70       for (uptr i = 0; i < count; i++)
71         batch_[i] = batch[i];
72     }
CountTransferBatch73     uptr Count() const { return count_; }
ClearTransferBatch74     void Clear() { count_ = 0; }
AddTransferBatch75     void Add(void *ptr) {
76       batch_[count_++] = ptr;
77       DCHECK_LE(count_, kMaxNumCached);
78     }
CopyToArrayTransferBatch79     void CopyToArray(void *to_batch[]) const {
80       for (uptr i = 0, n = Count(); i < n; i++)
81         to_batch[i] = batch_[i];
82     }
83 
84     // How much memory do we need for a batch containing n elements.
AllocationSizeRequiredForNElementsTransferBatch85     static uptr AllocationSizeRequiredForNElements(uptr n) {
86       return sizeof(uptr) * 2 + sizeof(void *) * n;
87     }
MaxCachedTransferBatch88     static uptr MaxCached(uptr size) {
89       return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
90     }
91 
92     TransferBatch *next;
93 
94    private:
95     uptr count_;
96     void *batch_[kMaxNumCached];
97   };
98 
99   static const uptr kBatchSize = sizeof(TransferBatch);
100   COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
101   COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
102 
ClassIdToSize(uptr class_id)103   static uptr ClassIdToSize(uptr class_id) {
104     return (class_id == SizeClassMap::kBatchClassID) ?
105         kBatchSize : SizeClassMap::Size(class_id);
106   }
107 
108   typedef SizeClassAllocator32<Params> ThisT;
109   typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
110 
Init(s32 release_to_os_interval_ms)111   void Init(s32 release_to_os_interval_ms) {
112     possible_regions.Init();
113     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
114   }
115 
ReleaseToOSIntervalMs()116   s32 ReleaseToOSIntervalMs() const {
117     return kReleaseToOSIntervalNever;
118   }
119 
SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms)120   void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
121     // This is empty here. Currently only implemented in 64-bit allocator.
122   }
123 
ForceReleaseToOS()124   void ForceReleaseToOS() {
125     // Currently implemented in 64-bit allocator only.
126   }
127 
MapWithCallback(uptr size)128   void *MapWithCallback(uptr size) {
129     void *res = MmapOrDie(size, PrimaryAllocatorName);
130     MapUnmapCallback().OnMap((uptr)res, size);
131     return res;
132   }
133 
UnmapWithCallback(uptr beg,uptr size)134   void UnmapWithCallback(uptr beg, uptr size) {
135     MapUnmapCallback().OnUnmap(beg, size);
136     UnmapOrDie(reinterpret_cast<void *>(beg), size);
137   }
138 
CanAllocate(uptr size,uptr alignment)139   static bool CanAllocate(uptr size, uptr alignment) {
140     return size <= SizeClassMap::kMaxSize &&
141       alignment <= SizeClassMap::kMaxSize;
142   }
143 
GetMetaData(const void * p)144   void *GetMetaData(const void *p) {
145     CHECK(PointerIsMine(p));
146     uptr mem = reinterpret_cast<uptr>(p);
147     uptr beg = ComputeRegionBeg(mem);
148     uptr size = ClassIdToSize(GetSizeClass(p));
149     u32 offset = mem - beg;
150     uptr n = offset / (u32)size;  // 32-bit division
151     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
152     return reinterpret_cast<void*>(meta);
153   }
154 
AllocateBatch(AllocatorStats * stat,AllocatorCache * c,uptr class_id)155   NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
156                                         uptr class_id) {
157     DCHECK_LT(class_id, kNumClasses);
158     SizeClassInfo *sci = GetSizeClassInfo(class_id);
159     SpinMutexLock l(&sci->mutex);
160     if (sci->free_list.empty()) {
161       if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
162         return nullptr;
163       DCHECK(!sci->free_list.empty());
164     }
165     TransferBatch *b = sci->free_list.front();
166     sci->free_list.pop_front();
167     return b;
168   }
169 
DeallocateBatch(AllocatorStats * stat,uptr class_id,TransferBatch * b)170   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
171                                 TransferBatch *b) {
172     DCHECK_LT(class_id, kNumClasses);
173     CHECK_GT(b->Count(), 0);
174     SizeClassInfo *sci = GetSizeClassInfo(class_id);
175     SpinMutexLock l(&sci->mutex);
176     sci->free_list.push_front(b);
177   }
178 
PointerIsMine(const void * p)179   bool PointerIsMine(const void *p) {
180     uptr mem = reinterpret_cast<uptr>(p);
181     if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
182       mem &= (kSpaceSize - 1);
183     if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
184       return false;
185     return GetSizeClass(p) != 0;
186   }
187 
GetSizeClass(const void * p)188   uptr GetSizeClass(const void *p) {
189     return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
190   }
191 
GetBlockBegin(const void * p)192   void *GetBlockBegin(const void *p) {
193     CHECK(PointerIsMine(p));
194     uptr mem = reinterpret_cast<uptr>(p);
195     uptr beg = ComputeRegionBeg(mem);
196     uptr size = ClassIdToSize(GetSizeClass(p));
197     u32 offset = mem - beg;
198     u32 n = offset / (u32)size;  // 32-bit division
199     uptr res = beg + (n * (u32)size);
200     return reinterpret_cast<void*>(res);
201   }
202 
GetActuallyAllocatedSize(void * p)203   uptr GetActuallyAllocatedSize(void *p) {
204     CHECK(PointerIsMine(p));
205     return ClassIdToSize(GetSizeClass(p));
206   }
207 
ClassID(uptr size)208   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
209 
TotalMemoryUsed()210   uptr TotalMemoryUsed() {
211     // No need to lock here.
212     uptr res = 0;
213     for (uptr i = 0; i < kNumPossibleRegions; i++)
214       if (possible_regions[i])
215         res += kRegionSize;
216     return res;
217   }
218 
TestOnlyUnmap()219   void TestOnlyUnmap() {
220     for (uptr i = 0; i < kNumPossibleRegions; i++)
221       if (possible_regions[i])
222         UnmapWithCallback((i * kRegionSize), kRegionSize);
223   }
224 
225   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
226   // introspection API.
ForceLock()227   void ForceLock() {
228     for (uptr i = 0; i < kNumClasses; i++) {
229       GetSizeClassInfo(i)->mutex.Lock();
230     }
231   }
232 
ForceUnlock()233   void ForceUnlock() {
234     for (int i = kNumClasses - 1; i >= 0; i--) {
235       GetSizeClassInfo(i)->mutex.Unlock();
236     }
237   }
238 
239   // Iterate over all existing chunks.
240   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)241   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
242     for (uptr region = 0; region < kNumPossibleRegions; region++)
243       if (possible_regions[region]) {
244         uptr chunk_size = ClassIdToSize(possible_regions[region]);
245         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
246         uptr region_beg = region * kRegionSize;
247         for (uptr chunk = region_beg;
248              chunk < region_beg + max_chunks_in_region * chunk_size;
249              chunk += chunk_size) {
250           // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
251           callback(chunk, arg);
252         }
253       }
254   }
255 
PrintStats()256   void PrintStats() {}
257 
AdditionalSize()258   static uptr AdditionalSize() { return 0; }
259 
260   typedef SizeClassMap SizeClassMapT;
261   static const uptr kNumClasses = SizeClassMap::kNumClasses;
262 
263  private:
264   static const uptr kRegionSize = 1 << kRegionSizeLog;
265   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
266 
ALIGNED(SANITIZER_CACHE_LINE_SIZE)267   struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
268     StaticSpinMutex mutex;
269     IntrusiveList<TransferBatch> free_list;
270     u32 rand_state;
271   };
272 #if _LP64
273   COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
274 #endif
275 
ComputeRegionId(uptr mem)276   uptr ComputeRegionId(uptr mem) {
277     if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
278       mem &= (kSpaceSize - 1);
279     const uptr res = mem >> kRegionSizeLog;
280     CHECK_LT(res, kNumPossibleRegions);
281     return res;
282   }
283 
ComputeRegionBeg(uptr mem)284   uptr ComputeRegionBeg(uptr mem) {
285     return mem & ~(kRegionSize - 1);
286   }
287 
AllocateRegion(AllocatorStats * stat,uptr class_id)288   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
289     DCHECK_LT(class_id, kNumClasses);
290     const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
291         kRegionSize, kRegionSize, PrimaryAllocatorName));
292     if (UNLIKELY(!res))
293       return 0;
294     MapUnmapCallback().OnMap(res, kRegionSize);
295     stat->Add(AllocatorStatMapped, kRegionSize);
296     CHECK(IsAligned(res, kRegionSize));
297     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
298     return res;
299   }
300 
GetSizeClassInfo(uptr class_id)301   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
302     DCHECK_LT(class_id, kNumClasses);
303     return &size_class_info_array[class_id];
304   }
305 
PopulateBatches(AllocatorCache * c,SizeClassInfo * sci,uptr class_id,TransferBatch ** current_batch,uptr max_count,uptr * pointers_array,uptr count)306   bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
307                        TransferBatch **current_batch, uptr max_count,
308                        uptr *pointers_array, uptr count) {
309     // If using a separate class for batches, we do not need to shuffle it.
310     if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
311         class_id != SizeClassMap::kBatchClassID))
312       RandomShuffle(pointers_array, count, &sci->rand_state);
313     TransferBatch *b = *current_batch;
314     for (uptr i = 0; i < count; i++) {
315       if (!b) {
316         b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
317         if (UNLIKELY(!b))
318           return false;
319         b->Clear();
320       }
321       b->Add((void*)pointers_array[i]);
322       if (b->Count() == max_count) {
323         sci->free_list.push_back(b);
324         b = nullptr;
325       }
326     }
327     *current_batch = b;
328     return true;
329   }
330 
PopulateFreeList(AllocatorStats * stat,AllocatorCache * c,SizeClassInfo * sci,uptr class_id)331   bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
332                         SizeClassInfo *sci, uptr class_id) {
333     const uptr region = AllocateRegion(stat, class_id);
334     if (UNLIKELY(!region))
335       return false;
336     if (kRandomShuffleChunks)
337       if (UNLIKELY(sci->rand_state == 0))
338         // The random state is initialized from ASLR (PIE) and time.
339         sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
340     const uptr size = ClassIdToSize(class_id);
341     const uptr n_chunks = kRegionSize / (size + kMetadataSize);
342     const uptr max_count = TransferBatch::MaxCached(size);
343     DCHECK_GT(max_count, 0);
344     TransferBatch *b = nullptr;
345     constexpr uptr kShuffleArraySize = 48;
346     uptr shuffle_array[kShuffleArraySize];
347     uptr count = 0;
348     for (uptr i = region; i < region + n_chunks * size; i += size) {
349       shuffle_array[count++] = i;
350       if (count == kShuffleArraySize) {
351         if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
352                                       shuffle_array, count)))
353           return false;
354         count = 0;
355       }
356     }
357     if (count) {
358       if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
359                                     shuffle_array, count)))
360         return false;
361     }
362     if (b) {
363       CHECK_GT(b->Count(), 0);
364       sci->free_list.push_back(b);
365     }
366     return true;
367   }
368 
369   ByteMap possible_regions;
370   SizeClassInfo size_class_info_array[kNumClasses];
371 };
372