xref: /llvm-project/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp (revision 38dfcf96dfd5d88e641e8054ea2f3a008ff8bcfd)
1 //===-- sanitizer_allocator_test.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 // Tests for sanitizer_allocator.h.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator.h"
14 
15 #include <stdio.h>
16 #include <stdlib.h>
17 
18 #include <algorithm>
19 #include <random>
20 #include <set>
21 #include <vector>
22 
23 #include "gtest/gtest.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_common.h"
26 #include "sanitizer_pthread_wrappers.h"
27 #include "sanitizer_test_utils.h"
28 
29 using namespace __sanitizer;
30 
31 #if SANITIZER_SOLARIS && defined(__sparcv9)
32 // FIXME: These tests probably fail because Solaris/sparcv9 uses the full
33 // 64-bit address space.  Needs more investigation
34 #define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x
35 #else
36 #define SKIP_ON_SOLARIS_SPARCV9(x) x
37 #endif
38 
39 // On 64-bit systems with small virtual address spaces (e.g. 39-bit) we can't
40 // use size class maps with a large number of classes, as that will make the
41 // SizeClassAllocator64 region size too small (< 2^32).
42 #if SANITIZER_ANDROID && defined(__aarch64__)
43 #define ALLOCATOR64_SMALL_SIZE 1
44 #elif SANITIZER_RISCV64
45 #define ALLOCATOR64_SMALL_SIZE 1
46 #else
47 #define ALLOCATOR64_SMALL_SIZE 0
48 #endif
49 
50 // Too slow for debug build
51 #if !SANITIZER_DEBUG
52 
53 #if SANITIZER_CAN_USE_ALLOCATOR64
54 #if SANITIZER_WINDOWS
55 // On Windows 64-bit there is no easy way to find a large enough fixed address
56 // space that is always available. Thus, a dynamically allocated address space
57 // is used instead (i.e. ~(uptr)0).
58 static const uptr kAllocatorSpace = ~(uptr)0;
59 static const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
60 static const u64 kAddressSpaceSize = 1ULL << 47;
61 typedef DefaultSizeClassMap SizeClassMap;
62 #elif SANITIZER_ANDROID && defined(__aarch64__)
63 static const uptr kAllocatorSpace = 0x3000000000ULL;
64 static const uptr kAllocatorSize  = 0x2000000000ULL;
65 static const u64 kAddressSpaceSize = 1ULL << 39;
66 typedef VeryCompactSizeClassMap SizeClassMap;
67 #elif SANITIZER_RISCV64
68 const uptr kAllocatorSpace = ~(uptr)0;
69 const uptr kAllocatorSize = 0x2000000000ULL;  // 128G.
70 static const u64 kAddressSpaceSize = 1ULL << 38;
71 typedef VeryDenseSizeClassMap SizeClassMap;
72 #else
73 static const uptr kAllocatorSpace = 0x700000000000ULL;
74 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
75 static const u64 kAddressSpaceSize = 1ULL << 47;
76 typedef DefaultSizeClassMap SizeClassMap;
77 #endif
78 
79 template <typename AddressSpaceViewTy>
80 struct AP64 {  // Allocator Params. Short name for shorter demangled names..
81   static const uptr kSpaceBeg = kAllocatorSpace;
82   static const uptr kSpaceSize = kAllocatorSize;
83   static const uptr kMetadataSize = 16;
84   typedef ::SizeClassMap SizeClassMap;
85   typedef NoOpMapUnmapCallback MapUnmapCallback;
86   static const uptr kFlags = 0;
87   using AddressSpaceView = AddressSpaceViewTy;
88 };
89 
90 template <typename AddressSpaceViewTy>
91 struct AP64Dyn {
92   static const uptr kSpaceBeg = ~(uptr)0;
93   static const uptr kSpaceSize = kAllocatorSize;
94   static const uptr kMetadataSize = 16;
95   typedef ::SizeClassMap SizeClassMap;
96   typedef NoOpMapUnmapCallback MapUnmapCallback;
97   static const uptr kFlags = 0;
98   using AddressSpaceView = AddressSpaceViewTy;
99 };
100 
101 template <typename AddressSpaceViewTy>
102 struct AP64Compact {
103   static const uptr kSpaceBeg = ~(uptr)0;
104   static const uptr kSpaceSize = kAllocatorSize;
105   static const uptr kMetadataSize = 16;
106   typedef CompactSizeClassMap SizeClassMap;
107   typedef NoOpMapUnmapCallback MapUnmapCallback;
108   static const uptr kFlags = 0;
109   using AddressSpaceView = AddressSpaceViewTy;
110 };
111 
112 template <typename AddressSpaceViewTy>
113 struct AP64VeryCompact {
114   static const uptr kSpaceBeg = ~(uptr)0;
115   static const uptr kSpaceSize = 1ULL << 37;
116   static const uptr kMetadataSize = 16;
117   typedef VeryCompactSizeClassMap SizeClassMap;
118   typedef NoOpMapUnmapCallback MapUnmapCallback;
119   static const uptr kFlags = 0;
120   using AddressSpaceView = AddressSpaceViewTy;
121 };
122 
123 template <typename AddressSpaceViewTy>
124 struct AP64Dense {
125   static const uptr kSpaceBeg = kAllocatorSpace;
126   static const uptr kSpaceSize = kAllocatorSize;
127   static const uptr kMetadataSize = 16;
128   typedef DenseSizeClassMap SizeClassMap;
129   typedef NoOpMapUnmapCallback MapUnmapCallback;
130   static const uptr kFlags = 0;
131   using AddressSpaceView = AddressSpaceViewTy;
132 };
133 
134 template <typename AddressSpaceView>
135 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
136 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
137 
138 template <typename AddressSpaceView>
139 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
140 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
141 
142 template <typename AddressSpaceView>
143 using Allocator64CompactASVT =
144     SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
145 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
146 
147 template <typename AddressSpaceView>
148 using Allocator64VeryCompactASVT =
149     SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
150 using Allocator64VeryCompact =
151     Allocator64VeryCompactASVT<LocalAddressSpaceView>;
152 
153 template <typename AddressSpaceView>
154 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
155 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
156 
157 #elif defined(__mips64)
158 static const u64 kAddressSpaceSize = 1ULL << 40;
159 #elif defined(__aarch64__)
160 static const u64 kAddressSpaceSize = 1ULL << 39;
161 #elif defined(__s390x__)
162 static const u64 kAddressSpaceSize = 1ULL << 53;
163 #elif defined(__s390__)
164 static const u64 kAddressSpaceSize = 1ULL << 31;
165 #else
166 static const u64 kAddressSpaceSize = 1ULL << 32;
167 #endif
168 
169 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
170 
171 template <typename AddressSpaceViewTy>
172 struct AP32Compact {
173   static const uptr kSpaceBeg = 0;
174   static const u64 kSpaceSize = kAddressSpaceSize;
175   static const uptr kMetadataSize = 16;
176   typedef CompactSizeClassMap SizeClassMap;
177   static const uptr kRegionSizeLog = ::kRegionSizeLog;
178   using AddressSpaceView = AddressSpaceViewTy;
179   typedef NoOpMapUnmapCallback MapUnmapCallback;
180   static const uptr kFlags = 0;
181 };
182 template <typename AddressSpaceView>
183 using Allocator32CompactASVT =
184     SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
185 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
186 
187 template <class SizeClassMap>
188 void TestSizeClassMap() {
189   typedef SizeClassMap SCMap;
190   SCMap::Print();
191   SCMap::Validate();
192 }
193 
194 TEST(SanitizerCommon, DefaultSizeClassMap) {
195   TestSizeClassMap<DefaultSizeClassMap>();
196 }
197 
198 TEST(SanitizerCommon, CompactSizeClassMap) {
199   TestSizeClassMap<CompactSizeClassMap>();
200 }
201 
202 TEST(SanitizerCommon, VeryCompactSizeClassMap) {
203   TestSizeClassMap<VeryCompactSizeClassMap>();
204 }
205 
206 TEST(SanitizerCommon, InternalSizeClassMap) {
207   TestSizeClassMap<InternalSizeClassMap>();
208 }
209 
210 TEST(SanitizerCommon, DenseSizeClassMap) {
211   TestSizeClassMap<VeryCompactSizeClassMap>();
212 }
213 
214 template <class Allocator>
215 void TestSizeClassAllocator(uptr premapped_heap = 0) {
216   Allocator *a = new Allocator;
217   a->Init(kReleaseToOSIntervalNever, premapped_heap);
218   typename Allocator::AllocatorCache cache;
219   memset(&cache, 0, sizeof(cache));
220   cache.Init(0);
221 
222   static const uptr sizes[] = {
223     1, 16,  30, 40, 100, 1000, 10000,
224     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
225   };
226 
227   std::vector<void *> allocated;
228 
229   uptr last_total_allocated = 0;
230   for (int i = 0; i < 3; i++) {
231     // Allocate a bunch of chunks.
232     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
233       uptr size = sizes[s];
234       if (!a->CanAllocate(size, 1)) continue;
235       // printf("s = %ld\n", size);
236       uptr n_iter = std::max((uptr)6, 4000000 / size);
237       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
238       for (uptr i = 0; i < n_iter; i++) {
239         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
240         char *x = (char*)cache.Allocate(a, class_id0);
241         x[0] = 0;
242         x[size - 1] = 0;
243         x[size / 2] = 0;
244         allocated.push_back(x);
245         CHECK_EQ(x, a->GetBlockBegin(x));
246         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
247         CHECK(a->PointerIsMine(x));
248         CHECK(a->PointerIsMine(x + size - 1));
249         CHECK(a->PointerIsMine(x + size / 2));
250         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
251         uptr class_id = a->GetSizeClass(x);
252         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
253         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
254         metadata[0] = reinterpret_cast<uptr>(x) + 1;
255         metadata[1] = 0xABCD;
256       }
257     }
258     // Deallocate all.
259     for (uptr i = 0; i < allocated.size(); i++) {
260       void *x = allocated[i];
261       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
262       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
263       CHECK_EQ(metadata[1], 0xABCD);
264       cache.Deallocate(a, a->GetSizeClass(x), x);
265     }
266     allocated.clear();
267     uptr total_allocated = a->TotalMemoryUsed();
268     if (last_total_allocated == 0)
269       last_total_allocated = total_allocated;
270     CHECK_EQ(last_total_allocated, total_allocated);
271   }
272 
273   // Check that GetBlockBegin never crashes.
274   for (uptr x = 0, step = kAddressSpaceSize / 100000;
275        x < kAddressSpaceSize - step; x += step)
276     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
277       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
278 
279   a->TestOnlyUnmap();
280   delete a;
281 }
282 
283 #if SANITIZER_CAN_USE_ALLOCATOR64
284 
285 // Allocates kAllocatorSize aligned bytes on construction and frees it on
286 // destruction.
287 class ScopedPremappedHeap {
288  public:
289   ScopedPremappedHeap() {
290     BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap");
291     AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);
292   }
293 
294   ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }
295 
296   uptr Addr() { return AlignedAddr; }
297 
298  private:
299   void *BasePtr;
300   uptr AlignedAddr;
301 };
302 
303 // These tests can fail on Windows if memory is somewhat full and lit happens
304 // to run them all at the same time. FIXME: Make them not flaky and reenable.
305 #if !SANITIZER_WINDOWS
306 TEST(SanitizerCommon, SizeClassAllocator64) {
307   TestSizeClassAllocator<Allocator64>();
308 }
309 
310 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
311   TestSizeClassAllocator<Allocator64Dynamic>();
312 }
313 
314 #if !ALLOCATOR64_SMALL_SIZE
315 // Android only has 39-bit address space, so mapping 2 * kAllocatorSize
316 // sometimes fails.
317 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
318   ScopedPremappedHeap h;
319   TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
320 }
321 
322 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
323   TestSizeClassAllocator<Allocator64Compact>();
324 }
325 
326 TEST(SanitizerCommon, SizeClassAllocator64Dense) {
327   TestSizeClassAllocator<Allocator64Dense>();
328 }
329 #endif
330 
331 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
332   TestSizeClassAllocator<Allocator64VeryCompact>();
333 }
334 #endif
335 #endif
336 
337 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
338   TestSizeClassAllocator<Allocator32Compact>();
339 }
340 
341 template <typename AddressSpaceViewTy>
342 struct AP32SeparateBatches {
343   static const uptr kSpaceBeg = 0;
344   static const u64 kSpaceSize = kAddressSpaceSize;
345   static const uptr kMetadataSize = 16;
346   typedef DefaultSizeClassMap SizeClassMap;
347   static const uptr kRegionSizeLog = ::kRegionSizeLog;
348   using AddressSpaceView = AddressSpaceViewTy;
349   typedef NoOpMapUnmapCallback MapUnmapCallback;
350   static const uptr kFlags =
351       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
352 };
353 template <typename AddressSpaceView>
354 using Allocator32SeparateBatchesASVT =
355     SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
356 using Allocator32SeparateBatches =
357     Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
358 
359 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
360   TestSizeClassAllocator<Allocator32SeparateBatches>();
361 }
362 
363 template <class Allocator>
364 void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {
365   Allocator *a = new Allocator;
366   a->Init(kReleaseToOSIntervalNever, premapped_heap);
367   typename Allocator::AllocatorCache cache;
368   memset(&cache, 0, sizeof(cache));
369   cache.Init(0);
370 
371   const uptr kNumAllocs = 1 << 13;
372   void *allocated[kNumAllocs];
373   void *meta[kNumAllocs];
374   for (uptr i = 0; i < kNumAllocs; i++) {
375     void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
376     allocated[i] = x;
377     meta[i] = a->GetMetaData(x);
378   }
379   // Get Metadata kNumAllocs^2 times.
380   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
381     uptr idx = i % kNumAllocs;
382     void *m = a->GetMetaData(allocated[idx]);
383     EXPECT_EQ(m, meta[idx]);
384   }
385   for (uptr i = 0; i < kNumAllocs; i++) {
386     cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
387   }
388 
389   a->TestOnlyUnmap();
390   delete a;
391 }
392 
393 #if SANITIZER_CAN_USE_ALLOCATOR64
394 // These tests can fail on Windows if memory is somewhat full and lit happens
395 // to run them all at the same time. FIXME: Make them not flaky and reenable.
396 #if !SANITIZER_WINDOWS
397 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
398   SizeClassAllocatorMetadataStress<Allocator64>();
399 }
400 
401 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
402   SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
403 }
404 
405 #if !ALLOCATOR64_SMALL_SIZE
406 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
407   ScopedPremappedHeap h;
408   SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
409 }
410 
411 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
412   SizeClassAllocatorMetadataStress<Allocator64Compact>();
413 }
414 #endif
415 
416 #endif
417 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
418 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
419   SizeClassAllocatorMetadataStress<Allocator32Compact>();
420 }
421 
422 template <class Allocator>
423 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,
424                                            uptr premapped_heap = 0) {
425   Allocator *a = new Allocator;
426   a->Init(kReleaseToOSIntervalNever, premapped_heap);
427   typename Allocator::AllocatorCache cache;
428   memset(&cache, 0, sizeof(cache));
429   cache.Init(0);
430 
431   uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
432   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
433   // Make sure we correctly compute GetBlockBegin() w/o overflow.
434   for (size_t i = 0; i <= TotalSize / size; i++) {
435     void *x = cache.Allocate(a, max_size_class);
436     void *beg = a->GetBlockBegin(x);
437     // if ((i & (i - 1)) == 0)
438     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
439     EXPECT_EQ(x, beg);
440   }
441 
442   a->TestOnlyUnmap();
443   delete a;
444 }
445 
446 #if SANITIZER_CAN_USE_ALLOCATOR64
447 // These tests can fail on Windows if memory is somewhat full and lit happens
448 // to run them all at the same time. FIXME: Make them not flaky and reenable.
449 #if !SANITIZER_WINDOWS
450 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
451   SizeClassAllocatorGetBlockBeginStress<Allocator64>(
452       1ULL << (SANITIZER_ANDROID ? 31 : 33));
453 }
454 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
455   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
456       1ULL << (SANITIZER_ANDROID ? 31 : 33));
457 }
458 #if !ALLOCATOR64_SMALL_SIZE
459 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
460   ScopedPremappedHeap h;
461   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
462       1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());
463 }
464 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
465   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
466 }
467 #endif
468 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
469   // Does not have > 4Gb for each class.
470   SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
471 }
472 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
473   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
474 }
475 #endif
476 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
477 
478 struct TestMapUnmapCallback {
479   static int map_count, map_secondary_count, unmap_count;
480   void OnMap(uptr p, uptr size) const { map_count++; }
481   void OnMapSecondary(uptr p, uptr size) const { map_secondary_count++; }
482   void OnUnmap(uptr p, uptr size) const { unmap_count++; }
483 
484   static void Reset() { map_count = map_secondary_count = unmap_count = 0; }
485 };
486 int TestMapUnmapCallback::map_count;
487 int TestMapUnmapCallback::map_secondary_count;
488 int TestMapUnmapCallback::unmap_count;
489 
490 #if SANITIZER_CAN_USE_ALLOCATOR64
491 // These tests can fail on Windows if memory is somewhat full and lit happens
492 // to run them all at the same time. FIXME: Make them not flaky and reenable.
493 #if !SANITIZER_WINDOWS
494 
495 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
496 struct AP64WithCallback {
497   static const uptr kSpaceBeg = kAllocatorSpace;
498   static const uptr kSpaceSize = kAllocatorSize;
499   static const uptr kMetadataSize = 16;
500   typedef ::SizeClassMap SizeClassMap;
501   typedef TestMapUnmapCallback MapUnmapCallback;
502   static const uptr kFlags = 0;
503   using AddressSpaceView = AddressSpaceViewTy;
504 };
505 
506 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
507   TestMapUnmapCallback::Reset();
508   typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
509   Allocator64WithCallBack *a = new Allocator64WithCallBack;
510   a->Init(kReleaseToOSIntervalNever);
511   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
512   EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0);
513   typename Allocator64WithCallBack::AllocatorCache cache;
514   memset(&cache, 0, sizeof(cache));
515   cache.Init(0);
516   AllocatorStats stats;
517   stats.Init();
518   const size_t kNumChunks = 128;
519   uint32_t chunks[kNumChunks];
520   a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
521   // State + alloc + metadata + freearray.
522   EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
523   EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0);
524   a->TestOnlyUnmap();
525   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
526   delete a;
527 }
528 #endif
529 #endif
530 
531 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
532 struct AP32WithCallback {
533   static const uptr kSpaceBeg = 0;
534   static const u64 kSpaceSize = kAddressSpaceSize;
535   static const uptr kMetadataSize = 16;
536   typedef CompactSizeClassMap SizeClassMap;
537   static const uptr kRegionSizeLog = ::kRegionSizeLog;
538   using AddressSpaceView = AddressSpaceViewTy;
539   typedef TestMapUnmapCallback MapUnmapCallback;
540   static const uptr kFlags = 0;
541 };
542 
543 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
544   TestMapUnmapCallback::Reset();
545   typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
546   Allocator32WithCallBack *a = new Allocator32WithCallBack;
547   a->Init(kReleaseToOSIntervalNever);
548   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
549   EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0);
550   Allocator32WithCallBack::AllocatorCache cache;
551   memset(&cache, 0, sizeof(cache));
552   cache.Init(0);
553   AllocatorStats stats;
554   stats.Init();
555   a->AllocateBatch(&stats, &cache, 32);
556   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
557   EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0);
558   a->TestOnlyUnmap();
559   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
560   delete a;
561 }
562 
563 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
564   TestMapUnmapCallback::Reset();
565   LargeMmapAllocator<TestMapUnmapCallback> a;
566   a.Init();
567   AllocatorStats stats;
568   stats.Init();
569   void *x = a.Allocate(&stats, 1 << 20, 1);
570   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
571   EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 1);
572   a.Deallocate(&stats, x);
573   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
574 }
575 
576 // Don't test OOM conditions on Win64 because it causes other tests on the same
577 // machine to OOM.
578 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
579 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
580   Allocator64 a;
581   a.Init(kReleaseToOSIntervalNever);
582   Allocator64::AllocatorCache cache;
583   memset(&cache, 0, sizeof(cache));
584   cache.Init(0);
585   AllocatorStats stats;
586   stats.Init();
587 
588   const size_t kNumChunks = 128;
589   uint32_t chunks[kNumChunks];
590   bool allocation_failed = false;
591   for (int i = 0; i < 1000000; i++) {
592     uptr class_id = a.kNumClasses - 1;
593     if (!a.GetFromAllocator(&stats, class_id, chunks, kNumChunks)) {
594       allocation_failed = true;
595       break;
596     }
597   }
598   EXPECT_EQ(allocation_failed, true);
599 
600   a.TestOnlyUnmap();
601 }
602 #endif
603 
604 TEST(SanitizerCommon, LargeMmapAllocator) {
605   LargeMmapAllocator<NoOpMapUnmapCallback> a;
606   a.Init();
607   AllocatorStats stats;
608   stats.Init();
609 
610   static const int kNumAllocs = 1000;
611   char *allocated[kNumAllocs];
612   static const uptr size = 4000;
613   // Allocate some.
614   for (int i = 0; i < kNumAllocs; i++) {
615     allocated[i] = (char *)a.Allocate(&stats, size, 1);
616     CHECK(a.PointerIsMine(allocated[i]));
617   }
618   // Deallocate all.
619   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
620   for (int i = 0; i < kNumAllocs; i++) {
621     char *p = allocated[i];
622     CHECK(a.PointerIsMine(p));
623     a.Deallocate(&stats, p);
624   }
625   // Check that non left.
626   CHECK_EQ(a.TotalMemoryUsed(), 0);
627 
628   // Allocate some more, also add metadata.
629   for (int i = 0; i < kNumAllocs; i++) {
630     char *x = (char *)a.Allocate(&stats, size, 1);
631     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
632     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
633     *meta = i;
634     allocated[i] = x;
635   }
636   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
637     char *p = allocated[i % kNumAllocs];
638     CHECK(a.PointerIsMine(p));
639     CHECK(a.PointerIsMine(p + 2000));
640   }
641   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
642   // Deallocate all in reverse order.
643   for (int i = 0; i < kNumAllocs; i++) {
644     int idx = kNumAllocs - i - 1;
645     char *p = allocated[idx];
646     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
647     CHECK_EQ(*meta, idx);
648     CHECK(a.PointerIsMine(p));
649     a.Deallocate(&stats, p);
650   }
651   CHECK_EQ(a.TotalMemoryUsed(), 0);
652 
653   // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
654   // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
655   uptr max_alignment =
656       (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
657   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
658     const uptr kNumAlignedAllocs = 100;
659     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
660       uptr size = ((i % 10) + 1) * 4096;
661       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
662       CHECK_EQ(p, a.GetBlockBegin(p));
663       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
664       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
665       CHECK_EQ(0, (uptr)allocated[i] % alignment);
666       p[0] = p[size - 1] = 0;
667     }
668     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
669       a.Deallocate(&stats, allocated[i]);
670     }
671   }
672 
673   // Regression test for boundary condition in GetBlockBegin().
674   uptr page_size = GetPageSizeCached();
675   char *p = (char *)a.Allocate(&stats, page_size, 1);
676   CHECK_EQ(p, a.GetBlockBegin(p));
677   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
678   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
679   a.Deallocate(&stats, p);
680 }
681 
682 template <class PrimaryAllocator>
683 void TestCombinedAllocator(uptr premapped_heap = 0) {
684   typedef CombinedAllocator<PrimaryAllocator> Allocator;
685   Allocator *a = new Allocator;
686   a->Init(kReleaseToOSIntervalNever, premapped_heap);
687   std::mt19937 r;
688 
689   typename Allocator::AllocatorCache cache;
690   memset(&cache, 0, sizeof(cache));
691   a->InitCache(&cache);
692 
693   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
694   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
695   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
696   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
697   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
698   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
699 
700   const uptr kNumAllocs = 100000;
701   const uptr kNumIter = 10;
702   for (uptr iter = 0; iter < kNumIter; iter++) {
703     std::vector<void*> allocated;
704     for (uptr i = 0; i < kNumAllocs; i++) {
705       uptr size = (i % (1 << 14)) + 1;
706       if ((i % 1024) == 0)
707         size = 1 << (10 + (i % 14));
708       void *x = a->Allocate(&cache, size, 1);
709       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
710       CHECK_EQ(*meta, 0);
711       *meta = size;
712       allocated.push_back(x);
713     }
714 
715     std::shuffle(allocated.begin(), allocated.end(), r);
716 
717     // Test ForEachChunk(...)
718     {
719       std::set<void *> reported_chunks;
720       auto cb = [](uptr chunk, void *arg) {
721         auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
722         auto pair =
723             reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
724         // Check chunk is never reported more than once.
725         ASSERT_TRUE(pair.second);
726       };
727       a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
728       for (const auto &allocated_ptr : allocated) {
729         ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
730       }
731     }
732 
733     for (uptr i = 0; i < kNumAllocs; i++) {
734       void *x = allocated[i];
735       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
736       CHECK_NE(*meta, 0);
737       CHECK(a->PointerIsMine(x));
738       *meta = 0;
739       a->Deallocate(&cache, x);
740     }
741     allocated.clear();
742     a->SwallowCache(&cache);
743   }
744   a->DestroyCache(&cache);
745   a->TestOnlyUnmap();
746 }
747 
748 #if SANITIZER_CAN_USE_ALLOCATOR64
749 TEST(SanitizerCommon, CombinedAllocator64) {
750   TestCombinedAllocator<Allocator64>();
751 }
752 
753 TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
754   TestCombinedAllocator<Allocator64Dynamic>();
755 }
756 
757 #if !ALLOCATOR64_SMALL_SIZE
758 #if !SANITIZER_WINDOWS
759 // Windows fails to map 1TB, so disable this test.
760 TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
761   ScopedPremappedHeap h;
762   TestCombinedAllocator<Allocator64Dynamic>(h.Addr());
763 }
764 #endif
765 
766 TEST(SanitizerCommon, CombinedAllocator64Compact) {
767   TestCombinedAllocator<Allocator64Compact>();
768 }
769 #endif
770 
771 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
772   TestCombinedAllocator<Allocator64VeryCompact>();
773 }
774 #endif
775 
776 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {
777   TestCombinedAllocator<Allocator32Compact>();
778 }
779 
780 template <class Allocator>
781 void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {
782   using AllocatorCache = typename Allocator::AllocatorCache;
783   AllocatorCache cache;
784   Allocator *a = new Allocator();
785 
786   a->Init(kReleaseToOSIntervalNever, premapped_heap);
787   memset(&cache, 0, sizeof(cache));
788   cache.Init(0);
789 
790   const uptr kNumAllocs = 10000;
791   const int kNumIter = 100;
792   uptr saved_total = 0;
793   for (int class_id = 1; class_id <= 5; class_id++) {
794     for (int it = 0; it < kNumIter; it++) {
795       void *allocated[kNumAllocs];
796       for (uptr i = 0; i < kNumAllocs; i++) {
797         allocated[i] = cache.Allocate(a, class_id);
798       }
799       for (uptr i = 0; i < kNumAllocs; i++) {
800         cache.Deallocate(a, class_id, allocated[i]);
801       }
802       cache.Drain(a);
803       uptr total_allocated = a->TotalMemoryUsed();
804       if (it)
805         CHECK_EQ(saved_total, total_allocated);
806       saved_total = total_allocated;
807     }
808   }
809 
810   a->TestOnlyUnmap();
811   delete a;
812 }
813 
814 #if SANITIZER_CAN_USE_ALLOCATOR64
815 // These tests can fail on Windows if memory is somewhat full and lit happens
816 // to run them all at the same time. FIXME: Make them not flaky and reenable.
817 #if !SANITIZER_WINDOWS
818 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
819   TestSizeClassAllocatorLocalCache<Allocator64>();
820 }
821 
822 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
823   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
824 }
825 
826 #if !ALLOCATOR64_SMALL_SIZE
827 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
828   ScopedPremappedHeap h;
829   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
830 }
831 
832 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
833   TestSizeClassAllocatorLocalCache<Allocator64Compact>();
834 }
835 #endif
836 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
837   TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>();
838 }
839 #endif
840 #endif
841 
842 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
843   TestSizeClassAllocatorLocalCache<Allocator32Compact>();
844 }
845 
846 #if SANITIZER_CAN_USE_ALLOCATOR64
847 typedef Allocator64::AllocatorCache AllocatorCache;
848 static AllocatorCache static_allocator_cache;
849 
850 void *AllocatorLeakTestWorker(void *arg) {
851   typedef AllocatorCache::Allocator Allocator;
852   Allocator *a = (Allocator*)(arg);
853   static_allocator_cache.Allocate(a, 10);
854   static_allocator_cache.Drain(a);
855   return 0;
856 }
857 
858 TEST(SanitizerCommon, AllocatorLeakTest) {
859   typedef AllocatorCache::Allocator Allocator;
860   Allocator a;
861   a.Init(kReleaseToOSIntervalNever);
862   uptr total_used_memory = 0;
863   for (int i = 0; i < 100; i++) {
864     pthread_t t;
865     PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
866     PTHREAD_JOIN(t, 0);
867     if (i == 0)
868       total_used_memory = a.TotalMemoryUsed();
869     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
870   }
871 
872   a.TestOnlyUnmap();
873 }
874 
875 // Struct which is allocated to pass info to new threads.  The new thread frees
876 // it.
877 struct NewThreadParams {
878   AllocatorCache *thread_cache;
879   AllocatorCache::Allocator *allocator;
880   uptr class_id;
881 };
882 
883 // Called in a new thread.  Just frees its argument.
884 static void *DeallocNewThreadWorker(void *arg) {
885   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
886   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
887   return NULL;
888 }
889 
890 // The allocator cache is supposed to be POD and zero initialized.  We should be
891 // able to call Deallocate on a zeroed cache, and it will self-initialize.
892 TEST(Allocator, AllocatorCacheDeallocNewThread) {
893   AllocatorCache::Allocator allocator;
894   allocator.Init(kReleaseToOSIntervalNever);
895   AllocatorCache main_cache;
896   AllocatorCache child_cache;
897   memset(&main_cache, 0, sizeof(main_cache));
898   memset(&child_cache, 0, sizeof(child_cache));
899 
900   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
901   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
902       main_cache.Allocate(&allocator, class_id));
903   params->thread_cache = &child_cache;
904   params->allocator = &allocator;
905   params->class_id = class_id;
906   pthread_t t;
907   PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
908   PTHREAD_JOIN(t, 0);
909 
910   allocator.TestOnlyUnmap();
911 }
912 #endif
913 
914 TEST(Allocator, Basic) {
915   char *p = (char*)InternalAlloc(10);
916   EXPECT_NE(p, (char*)0);
917   char *p2 = (char*)InternalAlloc(20);
918   EXPECT_NE(p2, (char*)0);
919   EXPECT_NE(p2, p);
920   InternalFree(p);
921   InternalFree(p2);
922 }
923 
924 TEST(Allocator, Stress) {
925   const int kCount = 1000;
926   char *ptrs[kCount];
927   unsigned rnd = 42;
928   for (int i = 0; i < kCount; i++) {
929     uptr sz = my_rand_r(&rnd) % 1000;
930     char *p = (char*)InternalAlloc(sz);
931     EXPECT_NE(p, (char*)0);
932     ptrs[i] = p;
933   }
934   for (int i = 0; i < kCount; i++) {
935     InternalFree(ptrs[i]);
936   }
937 }
938 
939 TEST(Allocator, LargeAlloc) {
940   void *p = InternalAlloc(10 << 20);
941   InternalFree(p);
942 }
943 
944 TEST(Allocator, ScopedBuffer) {
945   const int kSize = 512;
946   {
947     InternalMmapVector<int> int_buf(kSize);
948     EXPECT_EQ((uptr)kSize, int_buf.size());
949   }
950   InternalMmapVector<char> char_buf(kSize);
951   EXPECT_EQ((uptr)kSize, char_buf.size());
952   internal_memset(char_buf.data(), 'c', kSize);
953   for (int i = 0; i < kSize; i++) {
954     EXPECT_EQ('c', char_buf[i]);
955   }
956 }
957 
958 void IterationTestCallback(uptr chunk, void *arg) {
959   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
960 }
961 
962 template <class Allocator>
963 void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {
964   Allocator *a = new Allocator;
965   a->Init(kReleaseToOSIntervalNever, premapped_heap);
966   typename Allocator::AllocatorCache cache;
967   memset(&cache, 0, sizeof(cache));
968   cache.Init(0);
969 
970   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
971     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
972 
973   std::vector<void *> allocated;
974 
975   // Allocate a bunch of chunks.
976   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
977     uptr size = sizes[s];
978     if (!a->CanAllocate(size, 1)) continue;
979     // printf("s = %ld\n", size);
980     uptr n_iter = std::max((uptr)6, 80000 / size);
981     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
982     for (uptr j = 0; j < n_iter; j++) {
983       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
984       void *x = cache.Allocate(a, class_id0);
985       allocated.push_back(x);
986     }
987   }
988 
989   std::set<uptr> reported_chunks;
990   a->ForceLock();
991   a->ForEachChunk(IterationTestCallback, &reported_chunks);
992   a->ForceUnlock();
993 
994   for (uptr i = 0; i < allocated.size(); i++) {
995     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
996     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
997               reported_chunks.end());
998   }
999 
1000   a->TestOnlyUnmap();
1001   delete a;
1002 }
1003 
1004 #if SANITIZER_CAN_USE_ALLOCATOR64
1005 // These tests can fail on Windows if memory is somewhat full and lit happens
1006 // to run them all at the same time. FIXME: Make them not flaky and reenable.
1007 #if !SANITIZER_WINDOWS
1008 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
1009   TestSizeClassAllocatorIteration<Allocator64>();
1010 }
1011 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
1012   TestSizeClassAllocatorIteration<Allocator64Dynamic>();
1013 }
1014 #if !ALLOCATOR64_SMALL_SIZE
1015 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
1016   ScopedPremappedHeap h;
1017   TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
1018 }
1019 #endif
1020 #endif
1021 #endif
1022 
1023 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) {
1024   TestSizeClassAllocatorIteration<Allocator32Compact>();
1025 }
1026 
1027 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
1028   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1029   a.Init();
1030   AllocatorStats stats;
1031   stats.Init();
1032 
1033   static const uptr kNumAllocs = 1000;
1034   char *allocated[kNumAllocs];
1035   static const uptr size = 40;
1036   // Allocate some.
1037   for (uptr i = 0; i < kNumAllocs; i++)
1038     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1039 
1040   std::set<uptr> reported_chunks;
1041   a.ForceLock();
1042   a.ForEachChunk(IterationTestCallback, &reported_chunks);
1043   a.ForceUnlock();
1044 
1045   for (uptr i = 0; i < kNumAllocs; i++) {
1046     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
1047     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
1048               reported_chunks.end());
1049   }
1050   for (uptr i = 0; i < kNumAllocs; i++)
1051     a.Deallocate(&stats, allocated[i]);
1052 }
1053 
1054 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
1055   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1056   a.Init();
1057   AllocatorStats stats;
1058   stats.Init();
1059 
1060   static const uptr kNumAllocs = 1024;
1061   static const uptr kNumExpectedFalseLookups = 10000000;
1062   char *allocated[kNumAllocs];
1063   static const uptr size = 4096;
1064   // Allocate some.
1065   for (uptr i = 0; i < kNumAllocs; i++) {
1066     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1067   }
1068 
1069   a.ForceLock();
1070   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
1071     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
1072     char *p1 = allocated[i % kNumAllocs];
1073     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
1074     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
1075     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
1076     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
1077   }
1078 
1079   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
1080     void *p = reinterpret_cast<void *>(i % 1024);
1081     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1082     p = reinterpret_cast<void *>(~0L - (i % 1024));
1083     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1084   }
1085   a.ForceUnlock();
1086 
1087   for (uptr i = 0; i < kNumAllocs; i++)
1088     a.Deallocate(&stats, allocated[i]);
1089 }
1090 
1091 
1092 // Don't test OOM conditions on Win64 because it causes other tests on the same
1093 // machine to OOM.
1094 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
1095 typedef __sanitizer::SizeClassMap<2, 22, 22, 34, 128, 16> SpecialSizeClassMap;
1096 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
1097 struct AP64_SpecialSizeClassMap {
1098   static const uptr kSpaceBeg = kAllocatorSpace;
1099   static const uptr kSpaceSize = kAllocatorSize;
1100   static const uptr kMetadataSize = 0;
1101   typedef SpecialSizeClassMap SizeClassMap;
1102   typedef NoOpMapUnmapCallback MapUnmapCallback;
1103   static const uptr kFlags = 0;
1104   using AddressSpaceView = AddressSpaceViewTy;
1105 };
1106 
1107 // Regression test for out-of-memory condition in PopulateFreeList().
1108 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
1109   // In a world where regions are small and chunks are huge...
1110   typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
1111   const uptr kRegionSize =
1112       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
1113   SpecialAllocator64 *a = new SpecialAllocator64;
1114   a->Init(kReleaseToOSIntervalNever);
1115   SpecialAllocator64::AllocatorCache cache;
1116   memset(&cache, 0, sizeof(cache));
1117   cache.Init(0);
1118 
1119   // ...one man is on a mission to overflow a region with a series of
1120   // successive allocations.
1121 
1122   const uptr kClassID = ALLOCATOR64_SMALL_SIZE ? 18 : 24;
1123   const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
1124   ASSERT_LT(2 * kAllocationSize, kRegionSize);
1125   ASSERT_GT(3 * kAllocationSize, kRegionSize);
1126   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1127   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1128   EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
1129 
1130   const uptr Class2 = ALLOCATOR64_SMALL_SIZE ? 15 : 21;
1131   const uptr Size2 = SpecialSizeClassMap::Size(Class2);
1132   ASSERT_EQ(Size2 * 8, kRegionSize);
1133   char *p[7];
1134   for (int i = 0; i < 7; i++) {
1135     p[i] = (char*)cache.Allocate(a, Class2);
1136     EXPECT_NE(p[i], nullptr);
1137     fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
1138     p[i][Size2 - 1] = 42;
1139     if (i) ASSERT_LT(p[i - 1], p[i]);
1140   }
1141   EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
1142   cache.Deallocate(a, Class2, p[0]);
1143   cache.Drain(a);
1144   ASSERT_EQ(p[6][Size2 - 1], 42);
1145   a->TestOnlyUnmap();
1146   delete a;
1147 }
1148 
1149 #endif
1150 
1151 #if SANITIZER_CAN_USE_ALLOCATOR64
1152 
1153 class NoMemoryMapper {
1154  public:
1155   uptr last_request_buffer_size = 0;
1156 
1157   u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {
1158     last_request_buffer_size = buffer_size * sizeof(u64);
1159     return nullptr;
1160   }
1161 };
1162 
1163 class RedZoneMemoryMapper {
1164  public:
1165   RedZoneMemoryMapper() {
1166     const auto page_size = GetPageSize();
1167     buffer = MmapOrDie(3ULL * page_size, "");
1168     MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
1169     MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
1170   }
1171   ~RedZoneMemoryMapper() { UnmapOrDie(buffer, 3 * GetPageSize()); }
1172 
1173   u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {
1174     buffer_size *= sizeof(u64);
1175     const auto page_size = GetPageSize();
1176     CHECK_EQ(buffer_size, page_size);
1177     u64 *p =
1178         reinterpret_cast<u64 *>(reinterpret_cast<uptr>(buffer) + page_size);
1179     memset(p, 0, page_size);
1180     return p;
1181   }
1182 
1183  private:
1184   void *buffer;
1185 };
1186 
1187 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
1188   NoMemoryMapper no_memory_mapper;
1189   for (int i = 0; i < 64; i++) {
1190     // Various valid counter's max values packed into one word.
1191     Allocator64::PackedCounterArray counters_2n(1, 1ULL << i,
1192                                                 &no_memory_mapper);
1193     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1194 
1195     // Check the "all bit set" values too.
1196     Allocator64::PackedCounterArray counters_2n1_1(1, ~0ULL >> i,
1197                                                    &no_memory_mapper);
1198     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1199 
1200     // Verify the packing ratio, the counter is expected to be packed into the
1201     // closest power of 2 bits.
1202     Allocator64::PackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
1203     EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
1204               no_memory_mapper.last_request_buffer_size);
1205   }
1206 
1207   RedZoneMemoryMapper memory_mapper;
1208   // Go through 1, 2, 4, 8, .. 64 bits per counter.
1209   for (int i = 0; i < 7; i++) {
1210     // Make sure counters request one memory page for the buffer.
1211     const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
1212     Allocator64::PackedCounterArray counters(
1213         kNumCounters, 1ULL << ((1 << i) - 1), &memory_mapper);
1214     counters.Inc(0);
1215     for (u64 c = 1; c < kNumCounters - 1; c++) {
1216       ASSERT_EQ(0ULL, counters.Get(c));
1217       counters.Inc(c);
1218       ASSERT_EQ(1ULL, counters.Get(c - 1));
1219     }
1220     ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
1221     counters.Inc(kNumCounters - 1);
1222 
1223     if (i > 0) {
1224       counters.IncRange(0, kNumCounters - 1);
1225       for (u64 c = 0; c < kNumCounters; c++)
1226         ASSERT_EQ(2ULL, counters.Get(c));
1227     }
1228   }
1229 }
1230 
1231 class RangeRecorder {
1232  public:
1233   std::string reported_pages;
1234 
1235   RangeRecorder()
1236       : page_size_scaled_log(
1237             Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
1238         last_page_reported(0) {}
1239 
1240   void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
1241     from >>= page_size_scaled_log;
1242     to >>= page_size_scaled_log;
1243     ASSERT_LT(from, to);
1244     if (!reported_pages.empty())
1245       ASSERT_LT(last_page_reported, from);
1246     reported_pages.append(from - last_page_reported, '.');
1247     reported_pages.append(to - from, 'x');
1248     last_page_reported = to;
1249   }
1250 
1251  private:
1252   const uptr page_size_scaled_log;
1253   u32 last_page_reported;
1254 };
1255 
1256 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
1257   typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
1258 
1259   // 'x' denotes a page to be released, '.' denotes a page to be kept around.
1260   const char* test_cases[] = {
1261       "",
1262       ".",
1263       "x",
1264       "........",
1265       "xxxxxxxxxxx",
1266       "..............xxxxx",
1267       "xxxxxxxxxxxxxxxxxx.....",
1268       "......xxxxxxxx........",
1269       "xxx..........xxxxxxxxxxxxxxx",
1270       "......xxxx....xxxx........",
1271       "xxx..........xxxxxxxx....xxxxxxx",
1272       "x.x.x.x.x.x.x.x.x.x.x.x.",
1273       ".x.x.x.x.x.x.x.x.x.x.x.x",
1274       ".x.x.x.x.x.x.x.x.x.x.x.x.",
1275       "x.x.x.x.x.x.x.x.x.x.x.x.x",
1276   };
1277 
1278   for (auto test_case : test_cases) {
1279     RangeRecorder range_recorder;
1280     RangeTracker tracker(&range_recorder, 1);
1281     for (int i = 0; test_case[i] != 0; i++)
1282       tracker.NextPage(test_case[i] == 'x');
1283     tracker.Done();
1284     // Strip trailing '.'-pages before comparing the results as they are not
1285     // going to be reported to range_recorder anyway.
1286     const char* last_x = strrchr(test_case, 'x');
1287     std::string expected(
1288         test_case,
1289         last_x == nullptr ? 0 : (last_x - test_case + 1));
1290     EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
1291   }
1292 }
1293 
1294 class ReleasedPagesTrackingMemoryMapper {
1295  public:
1296   std::set<u32> reported_pages;
1297   std::vector<u64> buffer;
1298 
1299   u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {
1300     reported_pages.clear();
1301     buffer.assign(buffer_size, 0);
1302     return buffer.data();
1303   }
1304   void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
1305     uptr page_size_scaled =
1306         GetPageSizeCached() >> Allocator64::kCompactPtrScale;
1307     for (u32 i = from; i < to; i += page_size_scaled)
1308       reported_pages.insert(i);
1309   }
1310 };
1311 
1312 template <class Allocator>
1313 void TestReleaseFreeMemoryToOS() {
1314   ReleasedPagesTrackingMemoryMapper memory_mapper;
1315   const uptr kAllocatedPagesCount = 1024;
1316   const uptr page_size = GetPageSizeCached();
1317   const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
1318   std::mt19937 r;
1319   uint32_t rnd_state = 42;
1320 
1321   for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
1322       class_id++) {
1323     const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
1324     const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
1325     const uptr max_chunks =
1326         kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
1327 
1328     // Generate the random free list.
1329     std::vector<u32> free_array;
1330     bool in_free_range = false;
1331     uptr current_range_end = 0;
1332     for (uptr i = 0; i < max_chunks; i++) {
1333       if (i == current_range_end) {
1334         in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
1335         current_range_end += my_rand_r(&rnd_state) % 100 + 1;
1336       }
1337       if (in_free_range)
1338         free_array.push_back(i * chunk_size_scaled);
1339     }
1340     if (free_array.empty())
1341       continue;
1342     // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
1343     // the list ordering.
1344     std::shuffle(free_array.begin(), free_array.end(), r);
1345 
1346     Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
1347                                      chunk_size, kAllocatedPagesCount,
1348                                      &memory_mapper, class_id);
1349 
1350     // Verify that there are no released pages touched by used chunks and all
1351     // ranges of free chunks big enough to contain the entire memory pages had
1352     // these pages released.
1353     uptr verified_released_pages = 0;
1354     std::set<u32> free_chunks(free_array.begin(), free_array.end());
1355 
1356     u32 current_chunk = 0;
1357     in_free_range = false;
1358     u32 current_free_range_start = 0;
1359     for (uptr i = 0; i <= max_chunks; i++) {
1360       bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
1361 
1362       if (is_free_chunk) {
1363         if (!in_free_range) {
1364           in_free_range = true;
1365           current_free_range_start = current_chunk;
1366         }
1367       } else {
1368         // Verify that this used chunk does not touch any released page.
1369         for (uptr i_page = current_chunk / page_size_scaled;
1370              i_page <= (current_chunk + chunk_size_scaled - 1) /
1371                        page_size_scaled;
1372              i_page++) {
1373           bool page_released =
1374               memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
1375               memory_mapper.reported_pages.end();
1376           ASSERT_EQ(false, page_released);
1377         }
1378 
1379         if (in_free_range) {
1380           in_free_range = false;
1381           // Verify that all entire memory pages covered by this range of free
1382           // chunks were released.
1383           u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
1384           while (page + page_size_scaled <= current_chunk) {
1385             bool page_released =
1386                 memory_mapper.reported_pages.find(page) !=
1387                 memory_mapper.reported_pages.end();
1388             ASSERT_EQ(true, page_released);
1389             verified_released_pages++;
1390             page += page_size_scaled;
1391           }
1392         }
1393       }
1394 
1395       current_chunk += chunk_size_scaled;
1396     }
1397 
1398     ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
1399   }
1400 }
1401 
1402 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
1403   TestReleaseFreeMemoryToOS<Allocator64>();
1404 }
1405 
1406 #if !ALLOCATOR64_SMALL_SIZE
1407 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
1408   TestReleaseFreeMemoryToOS<Allocator64Compact>();
1409 }
1410 
1411 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
1412   TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
1413 }
1414 #endif  // !ALLOCATOR64_SMALL_SIZE
1415 
1416 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
1417 
1418 TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) {
1419   // When allocating a memory block slightly bigger than a memory page and
1420   // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round
1421   // the size up to the page size, so that subsequent calls to the allocator
1422   // can use the remaining space in the last allocated page.
1423   static LowLevelAllocator allocator;
1424   char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16);
1425   char *ptr2 = (char *)allocator.Allocate(16);
1426   EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16);
1427 }
1428 
1429 #endif  // #if !SANITIZER_DEBUG
1430