xref: /llvm-project/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp (revision d89d3dfae17d7795dc1ef013db66272020de1959)
1 //===-- sanitizer_allocator_test.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 // Tests for sanitizer_allocator.h.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator.h"
14 #include "sanitizer_common/sanitizer_allocator_internal.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 
17 #include "sanitizer_test_utils.h"
18 #include "sanitizer_pthread_wrappers.h"
19 
20 #include "gtest/gtest.h"
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <algorithm>
25 #include <vector>
26 #include <random>
27 #include <set>
28 
29 using namespace __sanitizer;
30 
31 #if SANITIZER_SOLARIS && defined(__sparcv9)
32 // FIXME: These tests probably fail because Solaris/sparcv9 uses the full
33 // 64-bit address space.  Needs more investigation
34 #define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x
35 #else
36 #define SKIP_ON_SOLARIS_SPARCV9(x) x
37 #endif
38 
39 // On 64-bit systems with small virtual address spaces (e.g. 39-bit) we can't
40 // use size class maps with a large number of classes, as that will make the
41 // SizeClassAllocator64 region size too small (< 2^32).
42 #if SANITIZER_ANDROID && defined(__aarch64__)
43 #define ALLOCATOR64_SMALL_SIZE 1
44 #elif SANITIZER_RISCV64
45 #define ALLOCATOR64_SMALL_SIZE 1
46 #else
47 #define ALLOCATOR64_SMALL_SIZE 0
48 #endif
49 
50 // Too slow for debug build
51 #if !SANITIZER_DEBUG
52 
53 #if SANITIZER_CAN_USE_ALLOCATOR64
54 #if SANITIZER_WINDOWS
55 // On Windows 64-bit there is no easy way to find a large enough fixed address
56 // space that is always available. Thus, a dynamically allocated address space
57 // is used instead (i.e. ~(uptr)0).
58 static const uptr kAllocatorSpace = ~(uptr)0;
59 static const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
60 static const u64 kAddressSpaceSize = 1ULL << 47;
61 typedef DefaultSizeClassMap SizeClassMap;
62 #elif SANITIZER_ANDROID && defined(__aarch64__)
63 static const uptr kAllocatorSpace = 0x3000000000ULL;
64 static const uptr kAllocatorSize  = 0x2000000000ULL;
65 static const u64 kAddressSpaceSize = 1ULL << 39;
66 typedef VeryCompactSizeClassMap SizeClassMap;
67 #elif SANITIZER_RISCV64
68 const uptr kAllocatorSpace = ~(uptr)0;
69 const uptr kAllocatorSize = 0x2000000000ULL;  // 128G.
70 static const u64 kAddressSpaceSize = 1ULL << 38;
71 typedef VeryDenseSizeClassMap SizeClassMap;
72 #else
73 static const uptr kAllocatorSpace = 0x700000000000ULL;
74 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
75 static const u64 kAddressSpaceSize = 1ULL << 47;
76 typedef DefaultSizeClassMap SizeClassMap;
77 #endif
78 
79 template <typename AddressSpaceViewTy>
80 struct AP64 {  // Allocator Params. Short name for shorter demangled names..
81   static const uptr kSpaceBeg = kAllocatorSpace;
82   static const uptr kSpaceSize = kAllocatorSize;
83   static const uptr kMetadataSize = 16;
84   typedef ::SizeClassMap SizeClassMap;
85   typedef NoOpMapUnmapCallback MapUnmapCallback;
86   static const uptr kFlags = 0;
87   using AddressSpaceView = AddressSpaceViewTy;
88 };
89 
90 template <typename AddressSpaceViewTy>
91 struct AP64Dyn {
92   static const uptr kSpaceBeg = ~(uptr)0;
93   static const uptr kSpaceSize = kAllocatorSize;
94   static const uptr kMetadataSize = 16;
95   typedef ::SizeClassMap SizeClassMap;
96   typedef NoOpMapUnmapCallback MapUnmapCallback;
97   static const uptr kFlags = 0;
98   using AddressSpaceView = AddressSpaceViewTy;
99 };
100 
101 template <typename AddressSpaceViewTy>
102 struct AP64Compact {
103   static const uptr kSpaceBeg = ~(uptr)0;
104   static const uptr kSpaceSize = kAllocatorSize;
105   static const uptr kMetadataSize = 16;
106   typedef CompactSizeClassMap SizeClassMap;
107   typedef NoOpMapUnmapCallback MapUnmapCallback;
108   static const uptr kFlags = 0;
109   using AddressSpaceView = AddressSpaceViewTy;
110 };
111 
112 template <typename AddressSpaceViewTy>
113 struct AP64VeryCompact {
114   static const uptr kSpaceBeg = ~(uptr)0;
115   static const uptr kSpaceSize = 1ULL << 37;
116   static const uptr kMetadataSize = 16;
117   typedef VeryCompactSizeClassMap SizeClassMap;
118   typedef NoOpMapUnmapCallback MapUnmapCallback;
119   static const uptr kFlags = 0;
120   using AddressSpaceView = AddressSpaceViewTy;
121 };
122 
123 template <typename AddressSpaceViewTy>
124 struct AP64Dense {
125   static const uptr kSpaceBeg = kAllocatorSpace;
126   static const uptr kSpaceSize = kAllocatorSize;
127   static const uptr kMetadataSize = 16;
128   typedef DenseSizeClassMap SizeClassMap;
129   typedef NoOpMapUnmapCallback MapUnmapCallback;
130   static const uptr kFlags = 0;
131   using AddressSpaceView = AddressSpaceViewTy;
132 };
133 
134 template <typename AddressSpaceView>
135 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
136 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
137 
138 template <typename AddressSpaceView>
139 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
140 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
141 
142 template <typename AddressSpaceView>
143 using Allocator64CompactASVT =
144     SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
145 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
146 
147 template <typename AddressSpaceView>
148 using Allocator64VeryCompactASVT =
149     SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
150 using Allocator64VeryCompact =
151     Allocator64VeryCompactASVT<LocalAddressSpaceView>;
152 
153 template <typename AddressSpaceView>
154 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
155 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
156 
157 #elif defined(__mips64)
158 static const u64 kAddressSpaceSize = 1ULL << 40;
159 #elif defined(__aarch64__)
160 static const u64 kAddressSpaceSize = 1ULL << 39;
161 #elif defined(__s390x__)
162 static const u64 kAddressSpaceSize = 1ULL << 53;
163 #elif defined(__s390__)
164 static const u64 kAddressSpaceSize = 1ULL << 31;
165 #else
166 static const u64 kAddressSpaceSize = 1ULL << 32;
167 #endif
168 
169 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
170 
171 template <typename AddressSpaceViewTy>
172 struct AP32Compact {
173   static const uptr kSpaceBeg = 0;
174   static const u64 kSpaceSize = kAddressSpaceSize;
175   static const uptr kMetadataSize = 16;
176   typedef CompactSizeClassMap SizeClassMap;
177   static const uptr kRegionSizeLog = ::kRegionSizeLog;
178   using AddressSpaceView = AddressSpaceViewTy;
179   typedef NoOpMapUnmapCallback MapUnmapCallback;
180   static const uptr kFlags = 0;
181 };
182 template <typename AddressSpaceView>
183 using Allocator32CompactASVT =
184     SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
185 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
186 
187 template <class SizeClassMap>
188 void TestSizeClassMap() {
189   typedef SizeClassMap SCMap;
190   SCMap::Print();
191   SCMap::Validate();
192 }
193 
194 TEST(SanitizerCommon, DefaultSizeClassMap) {
195   TestSizeClassMap<DefaultSizeClassMap>();
196 }
197 
198 TEST(SanitizerCommon, CompactSizeClassMap) {
199   TestSizeClassMap<CompactSizeClassMap>();
200 }
201 
202 TEST(SanitizerCommon, VeryCompactSizeClassMap) {
203   TestSizeClassMap<VeryCompactSizeClassMap>();
204 }
205 
206 TEST(SanitizerCommon, InternalSizeClassMap) {
207   TestSizeClassMap<InternalSizeClassMap>();
208 }
209 
210 TEST(SanitizerCommon, DenseSizeClassMap) {
211   TestSizeClassMap<VeryCompactSizeClassMap>();
212 }
213 
214 template <class Allocator>
215 void TestSizeClassAllocator(uptr premapped_heap = 0) {
216   Allocator *a = new Allocator;
217   a->Init(kReleaseToOSIntervalNever, premapped_heap);
218   typename Allocator::AllocatorCache cache;
219   memset(&cache, 0, sizeof(cache));
220   cache.Init(0);
221 
222   static const uptr sizes[] = {
223     1, 16,  30, 40, 100, 1000, 10000,
224     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
225   };
226 
227   std::vector<void *> allocated;
228 
229   uptr last_total_allocated = 0;
230   for (int i = 0; i < 3; i++) {
231     // Allocate a bunch of chunks.
232     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
233       uptr size = sizes[s];
234       if (!a->CanAllocate(size, 1)) continue;
235       // printf("s = %ld\n", size);
236       uptr n_iter = std::max((uptr)6, 4000000 / size);
237       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
238       for (uptr i = 0; i < n_iter; i++) {
239         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
240         char *x = (char*)cache.Allocate(a, class_id0);
241         x[0] = 0;
242         x[size - 1] = 0;
243         x[size / 2] = 0;
244         allocated.push_back(x);
245         CHECK_EQ(x, a->GetBlockBegin(x));
246         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
247         CHECK(a->PointerIsMine(x));
248         CHECK(a->PointerIsMine(x + size - 1));
249         CHECK(a->PointerIsMine(x + size / 2));
250         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
251         uptr class_id = a->GetSizeClass(x);
252         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
253         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
254         metadata[0] = reinterpret_cast<uptr>(x) + 1;
255         metadata[1] = 0xABCD;
256       }
257     }
258     // Deallocate all.
259     for (uptr i = 0; i < allocated.size(); i++) {
260       void *x = allocated[i];
261       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
262       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
263       CHECK_EQ(metadata[1], 0xABCD);
264       cache.Deallocate(a, a->GetSizeClass(x), x);
265     }
266     allocated.clear();
267     uptr total_allocated = a->TotalMemoryUsed();
268     if (last_total_allocated == 0)
269       last_total_allocated = total_allocated;
270     CHECK_EQ(last_total_allocated, total_allocated);
271   }
272 
273   // Check that GetBlockBegin never crashes.
274   for (uptr x = 0, step = kAddressSpaceSize / 100000;
275        x < kAddressSpaceSize - step; x += step)
276     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
277       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
278 
279   a->TestOnlyUnmap();
280   delete a;
281 }
282 
283 #if SANITIZER_CAN_USE_ALLOCATOR64
284 
285 // Allocates kAllocatorSize aligned bytes on construction and frees it on
286 // destruction.
287 class ScopedPremappedHeap {
288  public:
289   ScopedPremappedHeap() {
290     BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap");
291     AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);
292   }
293 
294   ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }
295 
296   uptr Addr() { return AlignedAddr; }
297 
298  private:
299   void *BasePtr;
300   uptr AlignedAddr;
301 };
302 
303 // These tests can fail on Windows if memory is somewhat full and lit happens
304 // to run them all at the same time. FIXME: Make them not flaky and reenable.
305 #if !SANITIZER_WINDOWS
306 TEST(SanitizerCommon, SizeClassAllocator64) {
307   TestSizeClassAllocator<Allocator64>();
308 }
309 
310 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
311   TestSizeClassAllocator<Allocator64Dynamic>();
312 }
313 
314 #if !ALLOCATOR64_SMALL_SIZE
315 // Android only has 39-bit address space, so mapping 2 * kAllocatorSize
316 // sometimes fails.
317 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
318   ScopedPremappedHeap h;
319   TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
320 }
321 
322 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
323   TestSizeClassAllocator<Allocator64Compact>();
324 }
325 
326 TEST(SanitizerCommon, SizeClassAllocator64Dense) {
327   TestSizeClassAllocator<Allocator64Dense>();
328 }
329 #endif
330 
331 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
332   TestSizeClassAllocator<Allocator64VeryCompact>();
333 }
334 #endif
335 #endif
336 
337 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
338   TestSizeClassAllocator<Allocator32Compact>();
339 }
340 
341 template <typename AddressSpaceViewTy>
342 struct AP32SeparateBatches {
343   static const uptr kSpaceBeg = 0;
344   static const u64 kSpaceSize = kAddressSpaceSize;
345   static const uptr kMetadataSize = 16;
346   typedef DefaultSizeClassMap SizeClassMap;
347   static const uptr kRegionSizeLog = ::kRegionSizeLog;
348   using AddressSpaceView = AddressSpaceViewTy;
349   typedef NoOpMapUnmapCallback MapUnmapCallback;
350   static const uptr kFlags =
351       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
352 };
353 template <typename AddressSpaceView>
354 using Allocator32SeparateBatchesASVT =
355     SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
356 using Allocator32SeparateBatches =
357     Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
358 
359 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
360   TestSizeClassAllocator<Allocator32SeparateBatches>();
361 }
362 
363 template <class Allocator>
364 void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {
365   Allocator *a = new Allocator;
366   a->Init(kReleaseToOSIntervalNever, premapped_heap);
367   typename Allocator::AllocatorCache cache;
368   memset(&cache, 0, sizeof(cache));
369   cache.Init(0);
370 
371   const uptr kNumAllocs = 1 << 13;
372   void *allocated[kNumAllocs];
373   void *meta[kNumAllocs];
374   for (uptr i = 0; i < kNumAllocs; i++) {
375     void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
376     allocated[i] = x;
377     meta[i] = a->GetMetaData(x);
378   }
379   // Get Metadata kNumAllocs^2 times.
380   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
381     uptr idx = i % kNumAllocs;
382     void *m = a->GetMetaData(allocated[idx]);
383     EXPECT_EQ(m, meta[idx]);
384   }
385   for (uptr i = 0; i < kNumAllocs; i++) {
386     cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
387   }
388 
389   a->TestOnlyUnmap();
390   delete a;
391 }
392 
393 #if SANITIZER_CAN_USE_ALLOCATOR64
394 // These tests can fail on Windows if memory is somewhat full and lit happens
395 // to run them all at the same time. FIXME: Make them not flaky and reenable.
396 #if !SANITIZER_WINDOWS
397 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
398   SizeClassAllocatorMetadataStress<Allocator64>();
399 }
400 
401 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
402   SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
403 }
404 
405 #if !ALLOCATOR64_SMALL_SIZE
406 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
407   ScopedPremappedHeap h;
408   SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
409 }
410 
411 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
412   SizeClassAllocatorMetadataStress<Allocator64Compact>();
413 }
414 #endif
415 
416 #endif
417 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
418 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
419   SizeClassAllocatorMetadataStress<Allocator32Compact>();
420 }
421 
422 template <class Allocator>
423 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,
424                                            uptr premapped_heap = 0) {
425   Allocator *a = new Allocator;
426   a->Init(kReleaseToOSIntervalNever, premapped_heap);
427   typename Allocator::AllocatorCache cache;
428   memset(&cache, 0, sizeof(cache));
429   cache.Init(0);
430 
431   uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
432   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
433   // Make sure we correctly compute GetBlockBegin() w/o overflow.
434   for (size_t i = 0; i <= TotalSize / size; i++) {
435     void *x = cache.Allocate(a, max_size_class);
436     void *beg = a->GetBlockBegin(x);
437     // if ((i & (i - 1)) == 0)
438     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
439     EXPECT_EQ(x, beg);
440   }
441 
442   a->TestOnlyUnmap();
443   delete a;
444 }
445 
446 #if SANITIZER_CAN_USE_ALLOCATOR64
447 // These tests can fail on Windows if memory is somewhat full and lit happens
448 // to run them all at the same time. FIXME: Make them not flaky and reenable.
449 #if !SANITIZER_WINDOWS
450 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
451   SizeClassAllocatorGetBlockBeginStress<Allocator64>(
452       1ULL << (SANITIZER_ANDROID ? 31 : 33));
453 }
454 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
455   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
456       1ULL << (SANITIZER_ANDROID ? 31 : 33));
457 }
458 #if !ALLOCATOR64_SMALL_SIZE
459 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
460   ScopedPremappedHeap h;
461   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
462       1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());
463 }
464 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
465   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
466 }
467 #endif
468 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
469   // Does not have > 4Gb for each class.
470   SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
471 }
472 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
473   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
474 }
475 #endif
476 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
477 
478 struct TestMapUnmapCallback {
479   static int map_count, unmap_count;
480   void OnMap(uptr p, uptr size) const { map_count++; }
481   void OnUnmap(uptr p, uptr size) const { unmap_count++; }
482 };
483 int TestMapUnmapCallback::map_count;
484 int TestMapUnmapCallback::unmap_count;
485 
486 #if SANITIZER_CAN_USE_ALLOCATOR64
487 // These tests can fail on Windows if memory is somewhat full and lit happens
488 // to run them all at the same time. FIXME: Make them not flaky and reenable.
489 #if !SANITIZER_WINDOWS
490 
491 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
492 struct AP64WithCallback {
493   static const uptr kSpaceBeg = kAllocatorSpace;
494   static const uptr kSpaceSize = kAllocatorSize;
495   static const uptr kMetadataSize = 16;
496   typedef ::SizeClassMap SizeClassMap;
497   typedef TestMapUnmapCallback MapUnmapCallback;
498   static const uptr kFlags = 0;
499   using AddressSpaceView = AddressSpaceViewTy;
500 };
501 
502 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
503   TestMapUnmapCallback::map_count = 0;
504   TestMapUnmapCallback::unmap_count = 0;
505   typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
506   Allocator64WithCallBack *a = new Allocator64WithCallBack;
507   a->Init(kReleaseToOSIntervalNever);
508   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
509   typename Allocator64WithCallBack::AllocatorCache cache;
510   memset(&cache, 0, sizeof(cache));
511   cache.Init(0);
512   AllocatorStats stats;
513   stats.Init();
514   const size_t kNumChunks = 128;
515   uint32_t chunks[kNumChunks];
516   a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
517   // State + alloc + metadata + freearray.
518   EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
519   a->TestOnlyUnmap();
520   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
521   delete a;
522 }
523 #endif
524 #endif
525 
526 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
527 struct AP32WithCallback {
528   static const uptr kSpaceBeg = 0;
529   static const u64 kSpaceSize = kAddressSpaceSize;
530   static const uptr kMetadataSize = 16;
531   typedef CompactSizeClassMap SizeClassMap;
532   static const uptr kRegionSizeLog = ::kRegionSizeLog;
533   using AddressSpaceView = AddressSpaceViewTy;
534   typedef TestMapUnmapCallback MapUnmapCallback;
535   static const uptr kFlags = 0;
536 };
537 
538 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
539   TestMapUnmapCallback::map_count = 0;
540   TestMapUnmapCallback::unmap_count = 0;
541   typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
542   Allocator32WithCallBack *a = new Allocator32WithCallBack;
543   a->Init(kReleaseToOSIntervalNever);
544   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
545   Allocator32WithCallBack::AllocatorCache cache;
546   memset(&cache, 0, sizeof(cache));
547   cache.Init(0);
548   AllocatorStats stats;
549   stats.Init();
550   a->AllocateBatch(&stats, &cache, 32);
551   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
552   a->TestOnlyUnmap();
553   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
554   delete a;
555   // fprintf(stderr, "Map: %d Unmap: %d\n",
556   //         TestMapUnmapCallback::map_count,
557   //         TestMapUnmapCallback::unmap_count);
558 }
559 
560 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
561   TestMapUnmapCallback::map_count = 0;
562   TestMapUnmapCallback::unmap_count = 0;
563   LargeMmapAllocator<TestMapUnmapCallback> a;
564   a.Init();
565   AllocatorStats stats;
566   stats.Init();
567   void *x = a.Allocate(&stats, 1 << 20, 1);
568   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
569   a.Deallocate(&stats, x);
570   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
571 }
572 
573 // Don't test OOM conditions on Win64 because it causes other tests on the same
574 // machine to OOM.
575 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
576 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
577   Allocator64 a;
578   a.Init(kReleaseToOSIntervalNever);
579   Allocator64::AllocatorCache cache;
580   memset(&cache, 0, sizeof(cache));
581   cache.Init(0);
582   AllocatorStats stats;
583   stats.Init();
584 
585   const size_t kNumChunks = 128;
586   uint32_t chunks[kNumChunks];
587   bool allocation_failed = false;
588   for (int i = 0; i < 1000000; i++) {
589     uptr class_id = a.kNumClasses - 1;
590     if (!a.GetFromAllocator(&stats, class_id, chunks, kNumChunks)) {
591       allocation_failed = true;
592       break;
593     }
594   }
595   EXPECT_EQ(allocation_failed, true);
596 
597   a.TestOnlyUnmap();
598 }
599 #endif
600 
601 TEST(SanitizerCommon, LargeMmapAllocator) {
602   LargeMmapAllocator<NoOpMapUnmapCallback> a;
603   a.Init();
604   AllocatorStats stats;
605   stats.Init();
606 
607   static const int kNumAllocs = 1000;
608   char *allocated[kNumAllocs];
609   static const uptr size = 4000;
610   // Allocate some.
611   for (int i = 0; i < kNumAllocs; i++) {
612     allocated[i] = (char *)a.Allocate(&stats, size, 1);
613     CHECK(a.PointerIsMine(allocated[i]));
614   }
615   // Deallocate all.
616   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
617   for (int i = 0; i < kNumAllocs; i++) {
618     char *p = allocated[i];
619     CHECK(a.PointerIsMine(p));
620     a.Deallocate(&stats, p);
621   }
622   // Check that non left.
623   CHECK_EQ(a.TotalMemoryUsed(), 0);
624 
625   // Allocate some more, also add metadata.
626   for (int i = 0; i < kNumAllocs; i++) {
627     char *x = (char *)a.Allocate(&stats, size, 1);
628     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
629     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
630     *meta = i;
631     allocated[i] = x;
632   }
633   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
634     char *p = allocated[i % kNumAllocs];
635     CHECK(a.PointerIsMine(p));
636     CHECK(a.PointerIsMine(p + 2000));
637   }
638   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
639   // Deallocate all in reverse order.
640   for (int i = 0; i < kNumAllocs; i++) {
641     int idx = kNumAllocs - i - 1;
642     char *p = allocated[idx];
643     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
644     CHECK_EQ(*meta, idx);
645     CHECK(a.PointerIsMine(p));
646     a.Deallocate(&stats, p);
647   }
648   CHECK_EQ(a.TotalMemoryUsed(), 0);
649 
650   // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
651   // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
652   uptr max_alignment =
653       (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
654   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
655     const uptr kNumAlignedAllocs = 100;
656     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
657       uptr size = ((i % 10) + 1) * 4096;
658       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
659       CHECK_EQ(p, a.GetBlockBegin(p));
660       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
661       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
662       CHECK_EQ(0, (uptr)allocated[i] % alignment);
663       p[0] = p[size - 1] = 0;
664     }
665     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
666       a.Deallocate(&stats, allocated[i]);
667     }
668   }
669 
670   // Regression test for boundary condition in GetBlockBegin().
671   uptr page_size = GetPageSizeCached();
672   char *p = (char *)a.Allocate(&stats, page_size, 1);
673   CHECK_EQ(p, a.GetBlockBegin(p));
674   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
675   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
676   a.Deallocate(&stats, p);
677 }
678 
679 template <class PrimaryAllocator>
680 void TestCombinedAllocator(uptr premapped_heap = 0) {
681   typedef CombinedAllocator<PrimaryAllocator> Allocator;
682   Allocator *a = new Allocator;
683   a->Init(kReleaseToOSIntervalNever, premapped_heap);
684   std::mt19937 r;
685 
686   typename Allocator::AllocatorCache cache;
687   memset(&cache, 0, sizeof(cache));
688   a->InitCache(&cache);
689 
690   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
691   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
692   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
693   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
694   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
695   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
696 
697   const uptr kNumAllocs = 100000;
698   const uptr kNumIter = 10;
699   for (uptr iter = 0; iter < kNumIter; iter++) {
700     std::vector<void*> allocated;
701     for (uptr i = 0; i < kNumAllocs; i++) {
702       uptr size = (i % (1 << 14)) + 1;
703       if ((i % 1024) == 0)
704         size = 1 << (10 + (i % 14));
705       void *x = a->Allocate(&cache, size, 1);
706       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
707       CHECK_EQ(*meta, 0);
708       *meta = size;
709       allocated.push_back(x);
710     }
711 
712     std::shuffle(allocated.begin(), allocated.end(), r);
713 
714     // Test ForEachChunk(...)
715     {
716       std::set<void *> reported_chunks;
717       auto cb = [](uptr chunk, void *arg) {
718         auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
719         auto pair =
720             reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
721         // Check chunk is never reported more than once.
722         ASSERT_TRUE(pair.second);
723       };
724       a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
725       for (const auto &allocated_ptr : allocated) {
726         ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
727       }
728     }
729 
730     for (uptr i = 0; i < kNumAllocs; i++) {
731       void *x = allocated[i];
732       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
733       CHECK_NE(*meta, 0);
734       CHECK(a->PointerIsMine(x));
735       *meta = 0;
736       a->Deallocate(&cache, x);
737     }
738     allocated.clear();
739     a->SwallowCache(&cache);
740   }
741   a->DestroyCache(&cache);
742   a->TestOnlyUnmap();
743 }
744 
745 #if SANITIZER_CAN_USE_ALLOCATOR64
746 TEST(SanitizerCommon, CombinedAllocator64) {
747   TestCombinedAllocator<Allocator64>();
748 }
749 
750 TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
751   TestCombinedAllocator<Allocator64Dynamic>();
752 }
753 
754 #if !ALLOCATOR64_SMALL_SIZE
755 #if !SANITIZER_WINDOWS
756 // Windows fails to map 1TB, so disable this test.
757 TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
758   ScopedPremappedHeap h;
759   TestCombinedAllocator<Allocator64Dynamic>(h.Addr());
760 }
761 #endif
762 
763 TEST(SanitizerCommon, CombinedAllocator64Compact) {
764   TestCombinedAllocator<Allocator64Compact>();
765 }
766 #endif
767 
768 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
769   TestCombinedAllocator<Allocator64VeryCompact>();
770 }
771 #endif
772 
773 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {
774   TestCombinedAllocator<Allocator32Compact>();
775 }
776 
777 template <class Allocator>
778 void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {
779   using AllocatorCache = typename Allocator::AllocatorCache;
780   AllocatorCache cache;
781   Allocator *a = new Allocator();
782 
783   a->Init(kReleaseToOSIntervalNever, premapped_heap);
784   memset(&cache, 0, sizeof(cache));
785   cache.Init(0);
786 
787   const uptr kNumAllocs = 10000;
788   const int kNumIter = 100;
789   uptr saved_total = 0;
790   for (int class_id = 1; class_id <= 5; class_id++) {
791     for (int it = 0; it < kNumIter; it++) {
792       void *allocated[kNumAllocs];
793       for (uptr i = 0; i < kNumAllocs; i++) {
794         allocated[i] = cache.Allocate(a, class_id);
795       }
796       for (uptr i = 0; i < kNumAllocs; i++) {
797         cache.Deallocate(a, class_id, allocated[i]);
798       }
799       cache.Drain(a);
800       uptr total_allocated = a->TotalMemoryUsed();
801       if (it)
802         CHECK_EQ(saved_total, total_allocated);
803       saved_total = total_allocated;
804     }
805   }
806 
807   a->TestOnlyUnmap();
808   delete a;
809 }
810 
811 #if SANITIZER_CAN_USE_ALLOCATOR64
812 // These tests can fail on Windows if memory is somewhat full and lit happens
813 // to run them all at the same time. FIXME: Make them not flaky and reenable.
814 #if !SANITIZER_WINDOWS
815 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
816   TestSizeClassAllocatorLocalCache<Allocator64>();
817 }
818 
819 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
820   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
821 }
822 
823 #if !ALLOCATOR64_SMALL_SIZE
824 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
825   ScopedPremappedHeap h;
826   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
827 }
828 
829 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
830   TestSizeClassAllocatorLocalCache<Allocator64Compact>();
831 }
832 #endif
833 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
834   TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>();
835 }
836 #endif
837 #endif
838 
839 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
840   TestSizeClassAllocatorLocalCache<Allocator32Compact>();
841 }
842 
843 #if SANITIZER_CAN_USE_ALLOCATOR64
844 typedef Allocator64::AllocatorCache AllocatorCache;
845 static AllocatorCache static_allocator_cache;
846 
847 void *AllocatorLeakTestWorker(void *arg) {
848   typedef AllocatorCache::Allocator Allocator;
849   Allocator *a = (Allocator*)(arg);
850   static_allocator_cache.Allocate(a, 10);
851   static_allocator_cache.Drain(a);
852   return 0;
853 }
854 
855 TEST(SanitizerCommon, AllocatorLeakTest) {
856   typedef AllocatorCache::Allocator Allocator;
857   Allocator a;
858   a.Init(kReleaseToOSIntervalNever);
859   uptr total_used_memory = 0;
860   for (int i = 0; i < 100; i++) {
861     pthread_t t;
862     PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
863     PTHREAD_JOIN(t, 0);
864     if (i == 0)
865       total_used_memory = a.TotalMemoryUsed();
866     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
867   }
868 
869   a.TestOnlyUnmap();
870 }
871 
872 // Struct which is allocated to pass info to new threads.  The new thread frees
873 // it.
874 struct NewThreadParams {
875   AllocatorCache *thread_cache;
876   AllocatorCache::Allocator *allocator;
877   uptr class_id;
878 };
879 
880 // Called in a new thread.  Just frees its argument.
881 static void *DeallocNewThreadWorker(void *arg) {
882   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
883   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
884   return NULL;
885 }
886 
887 // The allocator cache is supposed to be POD and zero initialized.  We should be
888 // able to call Deallocate on a zeroed cache, and it will self-initialize.
889 TEST(Allocator, AllocatorCacheDeallocNewThread) {
890   AllocatorCache::Allocator allocator;
891   allocator.Init(kReleaseToOSIntervalNever);
892   AllocatorCache main_cache;
893   AllocatorCache child_cache;
894   memset(&main_cache, 0, sizeof(main_cache));
895   memset(&child_cache, 0, sizeof(child_cache));
896 
897   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
898   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
899       main_cache.Allocate(&allocator, class_id));
900   params->thread_cache = &child_cache;
901   params->allocator = &allocator;
902   params->class_id = class_id;
903   pthread_t t;
904   PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
905   PTHREAD_JOIN(t, 0);
906 
907   allocator.TestOnlyUnmap();
908 }
909 #endif
910 
911 TEST(Allocator, Basic) {
912   char *p = (char*)InternalAlloc(10);
913   EXPECT_NE(p, (char*)0);
914   char *p2 = (char*)InternalAlloc(20);
915   EXPECT_NE(p2, (char*)0);
916   EXPECT_NE(p2, p);
917   InternalFree(p);
918   InternalFree(p2);
919 }
920 
921 TEST(Allocator, Stress) {
922   const int kCount = 1000;
923   char *ptrs[kCount];
924   unsigned rnd = 42;
925   for (int i = 0; i < kCount; i++) {
926     uptr sz = my_rand_r(&rnd) % 1000;
927     char *p = (char*)InternalAlloc(sz);
928     EXPECT_NE(p, (char*)0);
929     ptrs[i] = p;
930   }
931   for (int i = 0; i < kCount; i++) {
932     InternalFree(ptrs[i]);
933   }
934 }
935 
936 TEST(Allocator, LargeAlloc) {
937   void *p = InternalAlloc(10 << 20);
938   InternalFree(p);
939 }
940 
941 TEST(Allocator, ScopedBuffer) {
942   const int kSize = 512;
943   {
944     InternalMmapVector<int> int_buf(kSize);
945     EXPECT_EQ((uptr)kSize, int_buf.size());
946   }
947   InternalMmapVector<char> char_buf(kSize);
948   EXPECT_EQ((uptr)kSize, char_buf.size());
949   internal_memset(char_buf.data(), 'c', kSize);
950   for (int i = 0; i < kSize; i++) {
951     EXPECT_EQ('c', char_buf[i]);
952   }
953 }
954 
955 void IterationTestCallback(uptr chunk, void *arg) {
956   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
957 }
958 
959 template <class Allocator>
960 void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {
961   Allocator *a = new Allocator;
962   a->Init(kReleaseToOSIntervalNever, premapped_heap);
963   typename Allocator::AllocatorCache cache;
964   memset(&cache, 0, sizeof(cache));
965   cache.Init(0);
966 
967   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
968     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
969 
970   std::vector<void *> allocated;
971 
972   // Allocate a bunch of chunks.
973   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
974     uptr size = sizes[s];
975     if (!a->CanAllocate(size, 1)) continue;
976     // printf("s = %ld\n", size);
977     uptr n_iter = std::max((uptr)6, 80000 / size);
978     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
979     for (uptr j = 0; j < n_iter; j++) {
980       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
981       void *x = cache.Allocate(a, class_id0);
982       allocated.push_back(x);
983     }
984   }
985 
986   std::set<uptr> reported_chunks;
987   a->ForceLock();
988   a->ForEachChunk(IterationTestCallback, &reported_chunks);
989   a->ForceUnlock();
990 
991   for (uptr i = 0; i < allocated.size(); i++) {
992     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
993     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
994               reported_chunks.end());
995   }
996 
997   a->TestOnlyUnmap();
998   delete a;
999 }
1000 
1001 #if SANITIZER_CAN_USE_ALLOCATOR64
1002 // These tests can fail on Windows if memory is somewhat full and lit happens
1003 // to run them all at the same time. FIXME: Make them not flaky and reenable.
1004 #if !SANITIZER_WINDOWS
1005 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
1006   TestSizeClassAllocatorIteration<Allocator64>();
1007 }
1008 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
1009   TestSizeClassAllocatorIteration<Allocator64Dynamic>();
1010 }
1011 #if !ALLOCATOR64_SMALL_SIZE
1012 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
1013   ScopedPremappedHeap h;
1014   TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
1015 }
1016 #endif
1017 #endif
1018 #endif
1019 
1020 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) {
1021   TestSizeClassAllocatorIteration<Allocator32Compact>();
1022 }
1023 
1024 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
1025   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1026   a.Init();
1027   AllocatorStats stats;
1028   stats.Init();
1029 
1030   static const uptr kNumAllocs = 1000;
1031   char *allocated[kNumAllocs];
1032   static const uptr size = 40;
1033   // Allocate some.
1034   for (uptr i = 0; i < kNumAllocs; i++)
1035     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1036 
1037   std::set<uptr> reported_chunks;
1038   a.ForceLock();
1039   a.ForEachChunk(IterationTestCallback, &reported_chunks);
1040   a.ForceUnlock();
1041 
1042   for (uptr i = 0; i < kNumAllocs; i++) {
1043     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
1044     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
1045               reported_chunks.end());
1046   }
1047   for (uptr i = 0; i < kNumAllocs; i++)
1048     a.Deallocate(&stats, allocated[i]);
1049 }
1050 
1051 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
1052   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1053   a.Init();
1054   AllocatorStats stats;
1055   stats.Init();
1056 
1057   static const uptr kNumAllocs = 1024;
1058   static const uptr kNumExpectedFalseLookups = 10000000;
1059   char *allocated[kNumAllocs];
1060   static const uptr size = 4096;
1061   // Allocate some.
1062   for (uptr i = 0; i < kNumAllocs; i++) {
1063     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1064   }
1065 
1066   a.ForceLock();
1067   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
1068     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
1069     char *p1 = allocated[i % kNumAllocs];
1070     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
1071     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
1072     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
1073     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
1074   }
1075 
1076   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
1077     void *p = reinterpret_cast<void *>(i % 1024);
1078     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1079     p = reinterpret_cast<void *>(~0L - (i % 1024));
1080     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1081   }
1082   a.ForceUnlock();
1083 
1084   for (uptr i = 0; i < kNumAllocs; i++)
1085     a.Deallocate(&stats, allocated[i]);
1086 }
1087 
1088 
1089 // Don't test OOM conditions on Win64 because it causes other tests on the same
1090 // machine to OOM.
1091 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
1092 typedef __sanitizer::SizeClassMap<2, 22, 22, 34, 128, 16> SpecialSizeClassMap;
1093 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
1094 struct AP64_SpecialSizeClassMap {
1095   static const uptr kSpaceBeg = kAllocatorSpace;
1096   static const uptr kSpaceSize = kAllocatorSize;
1097   static const uptr kMetadataSize = 0;
1098   typedef SpecialSizeClassMap SizeClassMap;
1099   typedef NoOpMapUnmapCallback MapUnmapCallback;
1100   static const uptr kFlags = 0;
1101   using AddressSpaceView = AddressSpaceViewTy;
1102 };
1103 
1104 // Regression test for out-of-memory condition in PopulateFreeList().
1105 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
1106   // In a world where regions are small and chunks are huge...
1107   typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
1108   const uptr kRegionSize =
1109       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
1110   SpecialAllocator64 *a = new SpecialAllocator64;
1111   a->Init(kReleaseToOSIntervalNever);
1112   SpecialAllocator64::AllocatorCache cache;
1113   memset(&cache, 0, sizeof(cache));
1114   cache.Init(0);
1115 
1116   // ...one man is on a mission to overflow a region with a series of
1117   // successive allocations.
1118 
1119   const uptr kClassID = ALLOCATOR64_SMALL_SIZE ? 18 : 24;
1120   const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
1121   ASSERT_LT(2 * kAllocationSize, kRegionSize);
1122   ASSERT_GT(3 * kAllocationSize, kRegionSize);
1123   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1124   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1125   EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
1126 
1127   const uptr Class2 = ALLOCATOR64_SMALL_SIZE ? 15 : 21;
1128   const uptr Size2 = SpecialSizeClassMap::Size(Class2);
1129   ASSERT_EQ(Size2 * 8, kRegionSize);
1130   char *p[7];
1131   for (int i = 0; i < 7; i++) {
1132     p[i] = (char*)cache.Allocate(a, Class2);
1133     EXPECT_NE(p[i], nullptr);
1134     fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
1135     p[i][Size2 - 1] = 42;
1136     if (i) ASSERT_LT(p[i - 1], p[i]);
1137   }
1138   EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
1139   cache.Deallocate(a, Class2, p[0]);
1140   cache.Drain(a);
1141   ASSERT_EQ(p[6][Size2 - 1], 42);
1142   a->TestOnlyUnmap();
1143   delete a;
1144 }
1145 
1146 #endif
1147 
1148 #if SANITIZER_CAN_USE_ALLOCATOR64
1149 
1150 class NoMemoryMapper {
1151  public:
1152   uptr last_request_buffer_size;
1153 
1154   NoMemoryMapper() : last_request_buffer_size(0) {}
1155 
1156   void *MapPackedCounterArrayBuffer(uptr buffer_size) {
1157     last_request_buffer_size = buffer_size;
1158     return nullptr;
1159   }
1160   void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {}
1161 };
1162 
1163 class RedZoneMemoryMapper {
1164  public:
1165   RedZoneMemoryMapper() {
1166     const auto page_size = GetPageSize();
1167     buffer = MmapOrDie(3ULL * page_size, "");
1168     MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
1169     MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
1170   }
1171   ~RedZoneMemoryMapper() {
1172     UnmapOrDie(buffer, 3 * GetPageSize());
1173   }
1174 
1175   void *MapPackedCounterArrayBuffer(uptr buffer_size) {
1176     const auto page_size = GetPageSize();
1177     CHECK_EQ(buffer_size, page_size);
1178     void *p =
1179         reinterpret_cast<void *>(reinterpret_cast<uptr>(buffer) + page_size);
1180     memset(p, 0, page_size);
1181     return p;
1182   }
1183   void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {}
1184 
1185  private:
1186   void *buffer;
1187 };
1188 
1189 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
1190   NoMemoryMapper no_memory_mapper;
1191   typedef Allocator64::PackedCounterArray<NoMemoryMapper>
1192       NoMemoryPackedCounterArray;
1193 
1194   for (int i = 0; i < 64; i++) {
1195     // Various valid counter's max values packed into one word.
1196     NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper);
1197     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1198 
1199     // Check the "all bit set" values too.
1200     NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper);
1201     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1202 
1203     // Verify the packing ratio, the counter is expected to be packed into the
1204     // closest power of 2 bits.
1205     NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
1206     EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
1207               no_memory_mapper.last_request_buffer_size);
1208   }
1209 
1210   RedZoneMemoryMapper memory_mapper;
1211   typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper>
1212       RedZonePackedCounterArray;
1213   // Go through 1, 2, 4, 8, .. 64 bits per counter.
1214   for (int i = 0; i < 7; i++) {
1215     // Make sure counters request one memory page for the buffer.
1216     const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
1217     RedZonePackedCounterArray counters(kNumCounters,
1218                                        1ULL << ((1 << i) - 1),
1219                                        &memory_mapper);
1220     counters.Inc(0);
1221     for (u64 c = 1; c < kNumCounters - 1; c++) {
1222       ASSERT_EQ(0ULL, counters.Get(c));
1223       counters.Inc(c);
1224       ASSERT_EQ(1ULL, counters.Get(c - 1));
1225     }
1226     ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
1227     counters.Inc(kNumCounters - 1);
1228 
1229     if (i > 0) {
1230       counters.IncRange(0, kNumCounters - 1);
1231       for (u64 c = 0; c < kNumCounters; c++)
1232         ASSERT_EQ(2ULL, counters.Get(c));
1233     }
1234   }
1235 }
1236 
1237 class RangeRecorder {
1238  public:
1239   std::string reported_pages;
1240 
1241   RangeRecorder()
1242       : page_size_scaled_log(
1243             Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
1244         last_page_reported(0) {}
1245 
1246   void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
1247     from >>= page_size_scaled_log;
1248     to >>= page_size_scaled_log;
1249     ASSERT_LT(from, to);
1250     if (!reported_pages.empty())
1251       ASSERT_LT(last_page_reported, from);
1252     reported_pages.append(from - last_page_reported, '.');
1253     reported_pages.append(to - from, 'x');
1254     last_page_reported = to;
1255   }
1256 
1257  private:
1258   const uptr page_size_scaled_log;
1259   u32 last_page_reported;
1260 };
1261 
1262 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
1263   typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
1264 
1265   // 'x' denotes a page to be released, '.' denotes a page to be kept around.
1266   const char* test_cases[] = {
1267       "",
1268       ".",
1269       "x",
1270       "........",
1271       "xxxxxxxxxxx",
1272       "..............xxxxx",
1273       "xxxxxxxxxxxxxxxxxx.....",
1274       "......xxxxxxxx........",
1275       "xxx..........xxxxxxxxxxxxxxx",
1276       "......xxxx....xxxx........",
1277       "xxx..........xxxxxxxx....xxxxxxx",
1278       "x.x.x.x.x.x.x.x.x.x.x.x.",
1279       ".x.x.x.x.x.x.x.x.x.x.x.x",
1280       ".x.x.x.x.x.x.x.x.x.x.x.x.",
1281       "x.x.x.x.x.x.x.x.x.x.x.x.x",
1282   };
1283 
1284   for (auto test_case : test_cases) {
1285     RangeRecorder range_recorder;
1286     RangeTracker tracker(&range_recorder, 1);
1287     for (int i = 0; test_case[i] != 0; i++)
1288       tracker.NextPage(test_case[i] == 'x');
1289     tracker.Done();
1290     // Strip trailing '.'-pages before comparing the results as they are not
1291     // going to be reported to range_recorder anyway.
1292     const char* last_x = strrchr(test_case, 'x');
1293     std::string expected(
1294         test_case,
1295         last_x == nullptr ? 0 : (last_x - test_case + 1));
1296     EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
1297   }
1298 }
1299 
1300 class ReleasedPagesTrackingMemoryMapper {
1301  public:
1302   std::set<u32> reported_pages;
1303 
1304   void *MapPackedCounterArrayBuffer(uptr buffer_size) {
1305     reported_pages.clear();
1306     return calloc(1, buffer_size);
1307   }
1308   void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
1309     free(buffer);
1310   }
1311 
1312   void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
1313     uptr page_size_scaled =
1314         GetPageSizeCached() >> Allocator64::kCompactPtrScale;
1315     for (u32 i = from; i < to; i += page_size_scaled)
1316       reported_pages.insert(i);
1317   }
1318 };
1319 
1320 template <class Allocator>
1321 void TestReleaseFreeMemoryToOS() {
1322   ReleasedPagesTrackingMemoryMapper memory_mapper;
1323   const uptr kAllocatedPagesCount = 1024;
1324   const uptr page_size = GetPageSizeCached();
1325   const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
1326   std::mt19937 r;
1327   uint32_t rnd_state = 42;
1328 
1329   for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
1330       class_id++) {
1331     const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
1332     const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
1333     const uptr max_chunks =
1334         kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
1335 
1336     // Generate the random free list.
1337     std::vector<u32> free_array;
1338     bool in_free_range = false;
1339     uptr current_range_end = 0;
1340     for (uptr i = 0; i < max_chunks; i++) {
1341       if (i == current_range_end) {
1342         in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
1343         current_range_end += my_rand_r(&rnd_state) % 100 + 1;
1344       }
1345       if (in_free_range)
1346         free_array.push_back(i * chunk_size_scaled);
1347     }
1348     if (free_array.empty())
1349       continue;
1350     // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
1351     // the list ordering.
1352     std::shuffle(free_array.begin(), free_array.end(), r);
1353 
1354     Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
1355                                      chunk_size, kAllocatedPagesCount,
1356                                      &memory_mapper, class_id);
1357 
1358     // Verify that there are no released pages touched by used chunks and all
1359     // ranges of free chunks big enough to contain the entire memory pages had
1360     // these pages released.
1361     uptr verified_released_pages = 0;
1362     std::set<u32> free_chunks(free_array.begin(), free_array.end());
1363 
1364     u32 current_chunk = 0;
1365     in_free_range = false;
1366     u32 current_free_range_start = 0;
1367     for (uptr i = 0; i <= max_chunks; i++) {
1368       bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
1369 
1370       if (is_free_chunk) {
1371         if (!in_free_range) {
1372           in_free_range = true;
1373           current_free_range_start = current_chunk;
1374         }
1375       } else {
1376         // Verify that this used chunk does not touch any released page.
1377         for (uptr i_page = current_chunk / page_size_scaled;
1378              i_page <= (current_chunk + chunk_size_scaled - 1) /
1379                        page_size_scaled;
1380              i_page++) {
1381           bool page_released =
1382               memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
1383               memory_mapper.reported_pages.end();
1384           ASSERT_EQ(false, page_released);
1385         }
1386 
1387         if (in_free_range) {
1388           in_free_range = false;
1389           // Verify that all entire memory pages covered by this range of free
1390           // chunks were released.
1391           u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
1392           while (page + page_size_scaled <= current_chunk) {
1393             bool page_released =
1394                 memory_mapper.reported_pages.find(page) !=
1395                 memory_mapper.reported_pages.end();
1396             ASSERT_EQ(true, page_released);
1397             verified_released_pages++;
1398             page += page_size_scaled;
1399           }
1400         }
1401       }
1402 
1403       current_chunk += chunk_size_scaled;
1404     }
1405 
1406     ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
1407   }
1408 }
1409 
1410 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
1411   TestReleaseFreeMemoryToOS<Allocator64>();
1412 }
1413 
1414 #if !ALLOCATOR64_SMALL_SIZE
1415 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
1416   TestReleaseFreeMemoryToOS<Allocator64Compact>();
1417 }
1418 
1419 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
1420   TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
1421 }
1422 #endif  // !ALLOCATOR64_SMALL_SIZE
1423 
1424 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
1425 
1426 TEST(SanitizerCommon, TwoLevelByteMap) {
1427   const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
1428   const u64 n = kSize1 * kSize2;
1429   TwoLevelByteMap<kSize1, kSize2> m;
1430   m.Init();
1431   for (u64 i = 0; i < n; i += 7) {
1432     m.set(i, (i % 100) + 1);
1433   }
1434   for (u64 j = 0; j < n; j++) {
1435     if (j % 7)
1436       EXPECT_EQ(m[j], 0);
1437     else
1438       EXPECT_EQ(m[j], (j % 100) + 1);
1439   }
1440 
1441   m.TestOnlyUnmap();
1442 }
1443 
1444 template <typename AddressSpaceView>
1445 using TestByteMapASVT =
1446     TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>;
1447 using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>;
1448 
1449 struct TestByteMapParam {
1450   TestByteMap *m;
1451   size_t shard;
1452   size_t num_shards;
1453 };
1454 
1455 void *TwoLevelByteMapUserThread(void *param) {
1456   TestByteMapParam *p = (TestByteMapParam*)param;
1457   for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
1458     size_t val = (i % 100) + 1;
1459     p->m->set(i, val);
1460     EXPECT_EQ((*p->m)[i], val);
1461   }
1462   return 0;
1463 }
1464 
1465 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
1466   TestByteMap m;
1467   m.Init();
1468   TestMapUnmapCallback::map_count = 0;
1469   TestMapUnmapCallback::unmap_count = 0;
1470   static const int kNumThreads = 4;
1471   pthread_t t[kNumThreads];
1472   TestByteMapParam p[kNumThreads];
1473   for (int i = 0; i < kNumThreads; i++) {
1474     p[i].m = &m;
1475     p[i].shard = i;
1476     p[i].num_shards = kNumThreads;
1477     PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
1478   }
1479   for (int i = 0; i < kNumThreads; i++) {
1480     PTHREAD_JOIN(t[i], 0);
1481   }
1482   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1483   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
1484   m.TestOnlyUnmap();
1485   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1486   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
1487 }
1488 
1489 TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) {
1490   // When allocating a memory block slightly bigger than a memory page and
1491   // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round
1492   // the size up to the page size, so that subsequent calls to the allocator
1493   // can use the remaining space in the last allocated page.
1494   static LowLevelAllocator allocator;
1495   char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16);
1496   char *ptr2 = (char *)allocator.Allocate(16);
1497   EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16);
1498 }
1499 
1500 #endif  // #if !SANITIZER_DEBUG
1501