xref: /llvm-project/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp (revision bca0cf768b6021124f5e5315be333c2f45f14fca)
1 //===-- sanitizer_allocator_test.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 // Tests for sanitizer_allocator.h.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator.h"
14 #include "sanitizer_common/sanitizer_allocator_internal.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 
17 #include "sanitizer_test_utils.h"
18 #include "sanitizer_pthread_wrappers.h"
19 
20 #include "gtest/gtest.h"
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <algorithm>
25 #include <vector>
26 #include <random>
27 #include <set>
28 
29 using namespace __sanitizer;
30 
31 #if SANITIZER_SOLARIS && defined(__sparcv9)
32 // FIXME: These tests probably fail because Solaris/sparcv9 uses the full
33 // 64-bit address space.  Needs more investigation
34 #define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x
35 #else
36 #define SKIP_ON_SOLARIS_SPARCV9(x) x
37 #endif
38 
39 // Too slow for debug build
40 #if !SANITIZER_DEBUG
41 
42 #if SANITIZER_CAN_USE_ALLOCATOR64
43 #if SANITIZER_WINDOWS
44 // On Windows 64-bit there is no easy way to find a large enough fixed address
45 // space that is always available. Thus, a dynamically allocated address space
46 // is used instead (i.e. ~(uptr)0).
47 static const uptr kAllocatorSpace = ~(uptr)0;
48 static const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
49 static const u64 kAddressSpaceSize = 1ULL << 47;
50 typedef DefaultSizeClassMap SizeClassMap;
51 #elif SANITIZER_ANDROID && defined(__aarch64__)
52 static const uptr kAllocatorSpace = 0x3000000000ULL;
53 static const uptr kAllocatorSize  = 0x2000000000ULL;
54 static const u64 kAddressSpaceSize = 1ULL << 39;
55 typedef VeryCompactSizeClassMap SizeClassMap;
56 #else
57 static const uptr kAllocatorSpace = 0x700000000000ULL;
58 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
59 static const u64 kAddressSpaceSize = 1ULL << 47;
60 typedef DefaultSizeClassMap SizeClassMap;
61 #endif
62 
63 template <typename AddressSpaceViewTy>
64 struct AP64 {  // Allocator Params. Short name for shorter demangled names..
65   static const uptr kSpaceBeg = kAllocatorSpace;
66   static const uptr kSpaceSize = kAllocatorSize;
67   static const uptr kMetadataSize = 16;
68   typedef ::SizeClassMap SizeClassMap;
69   typedef NoOpMapUnmapCallback MapUnmapCallback;
70   static const uptr kFlags = 0;
71   using AddressSpaceView = AddressSpaceViewTy;
72 };
73 
74 template <typename AddressSpaceViewTy>
75 struct AP64Dyn {
76   static const uptr kSpaceBeg = ~(uptr)0;
77   static const uptr kSpaceSize = kAllocatorSize;
78   static const uptr kMetadataSize = 16;
79   typedef ::SizeClassMap SizeClassMap;
80   typedef NoOpMapUnmapCallback MapUnmapCallback;
81   static const uptr kFlags = 0;
82   using AddressSpaceView = AddressSpaceViewTy;
83 };
84 
85 template <typename AddressSpaceViewTy>
86 struct AP64Compact {
87   static const uptr kSpaceBeg = ~(uptr)0;
88   static const uptr kSpaceSize = kAllocatorSize;
89   static const uptr kMetadataSize = 16;
90   typedef CompactSizeClassMap SizeClassMap;
91   typedef NoOpMapUnmapCallback MapUnmapCallback;
92   static const uptr kFlags = 0;
93   using AddressSpaceView = AddressSpaceViewTy;
94 };
95 
96 template <typename AddressSpaceViewTy>
97 struct AP64VeryCompact {
98   static const uptr kSpaceBeg = ~(uptr)0;
99   static const uptr kSpaceSize = 1ULL << 37;
100   static const uptr kMetadataSize = 16;
101   typedef VeryCompactSizeClassMap SizeClassMap;
102   typedef NoOpMapUnmapCallback MapUnmapCallback;
103   static const uptr kFlags = 0;
104   using AddressSpaceView = AddressSpaceViewTy;
105 };
106 
107 template <typename AddressSpaceViewTy>
108 struct AP64Dense {
109   static const uptr kSpaceBeg = kAllocatorSpace;
110   static const uptr kSpaceSize = kAllocatorSize;
111   static const uptr kMetadataSize = 16;
112   typedef DenseSizeClassMap SizeClassMap;
113   typedef NoOpMapUnmapCallback MapUnmapCallback;
114   static const uptr kFlags = 0;
115   using AddressSpaceView = AddressSpaceViewTy;
116 };
117 
118 template <typename AddressSpaceView>
119 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
120 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
121 
122 template <typename AddressSpaceView>
123 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
124 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
125 
126 template <typename AddressSpaceView>
127 using Allocator64CompactASVT =
128     SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
129 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
130 
131 template <typename AddressSpaceView>
132 using Allocator64VeryCompactASVT =
133     SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
134 using Allocator64VeryCompact =
135     Allocator64VeryCompactASVT<LocalAddressSpaceView>;
136 
137 template <typename AddressSpaceView>
138 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
139 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
140 
141 #elif defined(__mips64)
142 static const u64 kAddressSpaceSize = 1ULL << 40;
143 #elif defined(__aarch64__)
144 static const u64 kAddressSpaceSize = 1ULL << 39;
145 #elif defined(__s390x__)
146 static const u64 kAddressSpaceSize = 1ULL << 53;
147 #elif defined(__s390__)
148 static const u64 kAddressSpaceSize = 1ULL << 31;
149 #else
150 static const u64 kAddressSpaceSize = 1ULL << 32;
151 #endif
152 
153 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
154 
155 template <typename AddressSpaceViewTy>
156 struct AP32Compact {
157   static const uptr kSpaceBeg = 0;
158   static const u64 kSpaceSize = kAddressSpaceSize;
159   static const uptr kMetadataSize = 16;
160   typedef CompactSizeClassMap SizeClassMap;
161   static const uptr kRegionSizeLog = ::kRegionSizeLog;
162   using AddressSpaceView = AddressSpaceViewTy;
163   typedef NoOpMapUnmapCallback MapUnmapCallback;
164   static const uptr kFlags = 0;
165 };
166 template <typename AddressSpaceView>
167 using Allocator32CompactASVT =
168     SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
169 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
170 
171 template <class SizeClassMap>
172 void TestSizeClassMap() {
173   typedef SizeClassMap SCMap;
174   SCMap::Print();
175   SCMap::Validate();
176 }
177 
178 TEST(SanitizerCommon, DefaultSizeClassMap) {
179   TestSizeClassMap<DefaultSizeClassMap>();
180 }
181 
182 TEST(SanitizerCommon, CompactSizeClassMap) {
183   TestSizeClassMap<CompactSizeClassMap>();
184 }
185 
186 TEST(SanitizerCommon, VeryCompactSizeClassMap) {
187   TestSizeClassMap<VeryCompactSizeClassMap>();
188 }
189 
190 TEST(SanitizerCommon, InternalSizeClassMap) {
191   TestSizeClassMap<InternalSizeClassMap>();
192 }
193 
194 TEST(SanitizerCommon, DenseSizeClassMap) {
195   TestSizeClassMap<VeryCompactSizeClassMap>();
196 }
197 
198 template <class Allocator>
199 void TestSizeClassAllocator(uptr premapped_heap = 0) {
200   Allocator *a = new Allocator;
201   a->Init(kReleaseToOSIntervalNever, premapped_heap);
202   typename Allocator::AllocatorCache cache;
203   memset(&cache, 0, sizeof(cache));
204   cache.Init(0);
205 
206   static const uptr sizes[] = {
207     1, 16,  30, 40, 100, 1000, 10000,
208     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
209   };
210 
211   std::vector<void *> allocated;
212 
213   uptr last_total_allocated = 0;
214   for (int i = 0; i < 3; i++) {
215     // Allocate a bunch of chunks.
216     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
217       uptr size = sizes[s];
218       if (!a->CanAllocate(size, 1)) continue;
219       // printf("s = %ld\n", size);
220       uptr n_iter = std::max((uptr)6, 4000000 / size);
221       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
222       for (uptr i = 0; i < n_iter; i++) {
223         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
224         char *x = (char*)cache.Allocate(a, class_id0);
225         x[0] = 0;
226         x[size - 1] = 0;
227         x[size / 2] = 0;
228         allocated.push_back(x);
229         CHECK_EQ(x, a->GetBlockBegin(x));
230         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
231         CHECK(a->PointerIsMine(x));
232         CHECK(a->PointerIsMine(x + size - 1));
233         CHECK(a->PointerIsMine(x + size / 2));
234         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
235         uptr class_id = a->GetSizeClass(x);
236         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
237         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
238         metadata[0] = reinterpret_cast<uptr>(x) + 1;
239         metadata[1] = 0xABCD;
240       }
241     }
242     // Deallocate all.
243     for (uptr i = 0; i < allocated.size(); i++) {
244       void *x = allocated[i];
245       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
246       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
247       CHECK_EQ(metadata[1], 0xABCD);
248       cache.Deallocate(a, a->GetSizeClass(x), x);
249     }
250     allocated.clear();
251     uptr total_allocated = a->TotalMemoryUsed();
252     if (last_total_allocated == 0)
253       last_total_allocated = total_allocated;
254     CHECK_EQ(last_total_allocated, total_allocated);
255   }
256 
257   // Check that GetBlockBegin never crashes.
258   for (uptr x = 0, step = kAddressSpaceSize / 100000;
259        x < kAddressSpaceSize - step; x += step)
260     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
261       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
262 
263   a->TestOnlyUnmap();
264   delete a;
265 }
266 
267 #if SANITIZER_CAN_USE_ALLOCATOR64
268 
269 // Allocates kAllocatorSize aligned bytes on construction and frees it on
270 // destruction.
271 class ScopedPremappedHeap {
272  public:
273   ScopedPremappedHeap() {
274     BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap");
275     AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);
276   }
277 
278   ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }
279 
280   uptr Addr() { return AlignedAddr; }
281 
282  private:
283   void *BasePtr;
284   uptr AlignedAddr;
285 };
286 
287 // These tests can fail on Windows if memory is somewhat full and lit happens
288 // to run them all at the same time. FIXME: Make them not flaky and reenable.
289 #if !SANITIZER_WINDOWS
290 TEST(SanitizerCommon, SizeClassAllocator64) {
291   TestSizeClassAllocator<Allocator64>();
292 }
293 
294 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
295   TestSizeClassAllocator<Allocator64Dynamic>();
296 }
297 
298 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
299   ScopedPremappedHeap h;
300   TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
301 }
302 
303 #if !SANITIZER_ANDROID
304 //FIXME(kostyak): find values so that those work on Android as well.
305 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
306   TestSizeClassAllocator<Allocator64Compact>();
307 }
308 
309 TEST(SanitizerCommon, SizeClassAllocator64Dense) {
310   TestSizeClassAllocator<Allocator64Dense>();
311 }
312 #endif
313 
314 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
315   TestSizeClassAllocator<Allocator64VeryCompact>();
316 }
317 #endif
318 #endif
319 
320 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
321   TestSizeClassAllocator<Allocator32Compact>();
322 }
323 
324 template <typename AddressSpaceViewTy>
325 struct AP32SeparateBatches {
326   static const uptr kSpaceBeg = 0;
327   static const u64 kSpaceSize = kAddressSpaceSize;
328   static const uptr kMetadataSize = 16;
329   typedef DefaultSizeClassMap SizeClassMap;
330   static const uptr kRegionSizeLog = ::kRegionSizeLog;
331   using AddressSpaceView = AddressSpaceViewTy;
332   typedef NoOpMapUnmapCallback MapUnmapCallback;
333   static const uptr kFlags =
334       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
335 };
336 template <typename AddressSpaceView>
337 using Allocator32SeparateBatchesASVT =
338     SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
339 using Allocator32SeparateBatches =
340     Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
341 
342 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
343   TestSizeClassAllocator<Allocator32SeparateBatches>();
344 }
345 
346 template <class Allocator>
347 void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {
348   Allocator *a = new Allocator;
349   a->Init(kReleaseToOSIntervalNever, premapped_heap);
350   typename Allocator::AllocatorCache cache;
351   memset(&cache, 0, sizeof(cache));
352   cache.Init(0);
353 
354   const uptr kNumAllocs = 1 << 13;
355   void *allocated[kNumAllocs];
356   void *meta[kNumAllocs];
357   for (uptr i = 0; i < kNumAllocs; i++) {
358     void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
359     allocated[i] = x;
360     meta[i] = a->GetMetaData(x);
361   }
362   // Get Metadata kNumAllocs^2 times.
363   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
364     uptr idx = i % kNumAllocs;
365     void *m = a->GetMetaData(allocated[idx]);
366     EXPECT_EQ(m, meta[idx]);
367   }
368   for (uptr i = 0; i < kNumAllocs; i++) {
369     cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
370   }
371 
372   a->TestOnlyUnmap();
373   delete a;
374 }
375 
376 #if SANITIZER_CAN_USE_ALLOCATOR64
377 // These tests can fail on Windows if memory is somewhat full and lit happens
378 // to run them all at the same time. FIXME: Make them not flaky and reenable.
379 #if !SANITIZER_WINDOWS
380 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
381   SizeClassAllocatorMetadataStress<Allocator64>();
382 }
383 
384 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
385   SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
386 }
387 
388 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
389   ScopedPremappedHeap h;
390   SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
391 }
392 
393 #if !SANITIZER_ANDROID
394 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
395   SizeClassAllocatorMetadataStress<Allocator64Compact>();
396 }
397 #endif
398 
399 #endif
400 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
401 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
402   SizeClassAllocatorMetadataStress<Allocator32Compact>();
403 }
404 
405 template <class Allocator>
406 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,
407                                            uptr premapped_heap = 0) {
408   Allocator *a = new Allocator;
409   a->Init(kReleaseToOSIntervalNever, premapped_heap);
410   typename Allocator::AllocatorCache cache;
411   memset(&cache, 0, sizeof(cache));
412   cache.Init(0);
413 
414   uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
415   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
416   // Make sure we correctly compute GetBlockBegin() w/o overflow.
417   for (size_t i = 0; i <= TotalSize / size; i++) {
418     void *x = cache.Allocate(a, max_size_class);
419     void *beg = a->GetBlockBegin(x);
420     // if ((i & (i - 1)) == 0)
421     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
422     EXPECT_EQ(x, beg);
423   }
424 
425   a->TestOnlyUnmap();
426   delete a;
427 }
428 
429 #if SANITIZER_CAN_USE_ALLOCATOR64
430 // These tests can fail on Windows if memory is somewhat full and lit happens
431 // to run them all at the same time. FIXME: Make them not flaky and reenable.
432 #if !SANITIZER_WINDOWS
433 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
434   SizeClassAllocatorGetBlockBeginStress<Allocator64>(
435       1ULL << (SANITIZER_ANDROID ? 31 : 33));
436 }
437 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
438   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
439       1ULL << (SANITIZER_ANDROID ? 31 : 33));
440 }
441 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
442   ScopedPremappedHeap h;
443   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
444       1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());
445 }
446 #if !SANITIZER_ANDROID
447 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
448   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
449 }
450 #endif
451 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
452   // Does not have > 4Gb for each class.
453   SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
454 }
455 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
456   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
457 }
458 #endif
459 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
460 
461 struct TestMapUnmapCallback {
462   static int map_count, unmap_count;
463   void OnMap(uptr p, uptr size) const { map_count++; }
464   void OnUnmap(uptr p, uptr size) const { unmap_count++; }
465 };
466 int TestMapUnmapCallback::map_count;
467 int TestMapUnmapCallback::unmap_count;
468 
469 #if SANITIZER_CAN_USE_ALLOCATOR64
470 // These tests can fail on Windows if memory is somewhat full and lit happens
471 // to run them all at the same time. FIXME: Make them not flaky and reenable.
472 #if !SANITIZER_WINDOWS
473 
474 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
475 struct AP64WithCallback {
476   static const uptr kSpaceBeg = kAllocatorSpace;
477   static const uptr kSpaceSize = kAllocatorSize;
478   static const uptr kMetadataSize = 16;
479   typedef ::SizeClassMap SizeClassMap;
480   typedef TestMapUnmapCallback MapUnmapCallback;
481   static const uptr kFlags = 0;
482   using AddressSpaceView = AddressSpaceViewTy;
483 };
484 
485 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
486   TestMapUnmapCallback::map_count = 0;
487   TestMapUnmapCallback::unmap_count = 0;
488   typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
489   Allocator64WithCallBack *a = new Allocator64WithCallBack;
490   a->Init(kReleaseToOSIntervalNever);
491   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
492   typename Allocator64WithCallBack::AllocatorCache cache;
493   memset(&cache, 0, sizeof(cache));
494   cache.Init(0);
495   AllocatorStats stats;
496   stats.Init();
497   const size_t kNumChunks = 128;
498   uint32_t chunks[kNumChunks];
499   a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
500   // State + alloc + metadata + freearray.
501   EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
502   a->TestOnlyUnmap();
503   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
504   delete a;
505 }
506 #endif
507 #endif
508 
509 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
510 struct AP32WithCallback {
511   static const uptr kSpaceBeg = 0;
512   static const u64 kSpaceSize = kAddressSpaceSize;
513   static const uptr kMetadataSize = 16;
514   typedef CompactSizeClassMap SizeClassMap;
515   static const uptr kRegionSizeLog = ::kRegionSizeLog;
516   using AddressSpaceView = AddressSpaceViewTy;
517   typedef TestMapUnmapCallback MapUnmapCallback;
518   static const uptr kFlags = 0;
519 };
520 
521 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
522   TestMapUnmapCallback::map_count = 0;
523   TestMapUnmapCallback::unmap_count = 0;
524   typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
525   Allocator32WithCallBack *a = new Allocator32WithCallBack;
526   a->Init(kReleaseToOSIntervalNever);
527   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
528   Allocator32WithCallBack::AllocatorCache cache;
529   memset(&cache, 0, sizeof(cache));
530   cache.Init(0);
531   AllocatorStats stats;
532   stats.Init();
533   a->AllocateBatch(&stats, &cache, 32);
534   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
535   a->TestOnlyUnmap();
536   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
537   delete a;
538   // fprintf(stderr, "Map: %d Unmap: %d\n",
539   //         TestMapUnmapCallback::map_count,
540   //         TestMapUnmapCallback::unmap_count);
541 }
542 
543 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
544   TestMapUnmapCallback::map_count = 0;
545   TestMapUnmapCallback::unmap_count = 0;
546   LargeMmapAllocator<TestMapUnmapCallback> a;
547   a.Init();
548   AllocatorStats stats;
549   stats.Init();
550   void *x = a.Allocate(&stats, 1 << 20, 1);
551   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
552   a.Deallocate(&stats, x);
553   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
554 }
555 
556 // Don't test OOM conditions on Win64 because it causes other tests on the same
557 // machine to OOM.
558 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
559 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
560   Allocator64 a;
561   a.Init(kReleaseToOSIntervalNever);
562   Allocator64::AllocatorCache cache;
563   memset(&cache, 0, sizeof(cache));
564   cache.Init(0);
565   AllocatorStats stats;
566   stats.Init();
567 
568   const size_t kNumChunks = 128;
569   uint32_t chunks[kNumChunks];
570   bool allocation_failed = false;
571   for (int i = 0; i < 1000000; i++) {
572     if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
573       allocation_failed = true;
574       break;
575     }
576   }
577   EXPECT_EQ(allocation_failed, true);
578 
579   a.TestOnlyUnmap();
580 }
581 #endif
582 
583 TEST(SanitizerCommon, LargeMmapAllocator) {
584   LargeMmapAllocator<NoOpMapUnmapCallback> a;
585   a.Init();
586   AllocatorStats stats;
587   stats.Init();
588 
589   static const int kNumAllocs = 1000;
590   char *allocated[kNumAllocs];
591   static const uptr size = 4000;
592   // Allocate some.
593   for (int i = 0; i < kNumAllocs; i++) {
594     allocated[i] = (char *)a.Allocate(&stats, size, 1);
595     CHECK(a.PointerIsMine(allocated[i]));
596   }
597   // Deallocate all.
598   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
599   for (int i = 0; i < kNumAllocs; i++) {
600     char *p = allocated[i];
601     CHECK(a.PointerIsMine(p));
602     a.Deallocate(&stats, p);
603   }
604   // Check that non left.
605   CHECK_EQ(a.TotalMemoryUsed(), 0);
606 
607   // Allocate some more, also add metadata.
608   for (int i = 0; i < kNumAllocs; i++) {
609     char *x = (char *)a.Allocate(&stats, size, 1);
610     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
611     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
612     *meta = i;
613     allocated[i] = x;
614   }
615   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
616     char *p = allocated[i % kNumAllocs];
617     CHECK(a.PointerIsMine(p));
618     CHECK(a.PointerIsMine(p + 2000));
619   }
620   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
621   // Deallocate all in reverse order.
622   for (int i = 0; i < kNumAllocs; i++) {
623     int idx = kNumAllocs - i - 1;
624     char *p = allocated[idx];
625     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
626     CHECK_EQ(*meta, idx);
627     CHECK(a.PointerIsMine(p));
628     a.Deallocate(&stats, p);
629   }
630   CHECK_EQ(a.TotalMemoryUsed(), 0);
631 
632   // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
633   // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
634   uptr max_alignment =
635       (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
636   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
637     const uptr kNumAlignedAllocs = 100;
638     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
639       uptr size = ((i % 10) + 1) * 4096;
640       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
641       CHECK_EQ(p, a.GetBlockBegin(p));
642       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
643       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
644       CHECK_EQ(0, (uptr)allocated[i] % alignment);
645       p[0] = p[size - 1] = 0;
646     }
647     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
648       a.Deallocate(&stats, allocated[i]);
649     }
650   }
651 
652   // Regression test for boundary condition in GetBlockBegin().
653   uptr page_size = GetPageSizeCached();
654   char *p = (char *)a.Allocate(&stats, page_size, 1);
655   CHECK_EQ(p, a.GetBlockBegin(p));
656   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
657   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
658   a.Deallocate(&stats, p);
659 }
660 
661 template <class PrimaryAllocator>
662 void TestCombinedAllocator(uptr premapped_heap = 0) {
663   typedef CombinedAllocator<PrimaryAllocator> Allocator;
664   Allocator *a = new Allocator;
665   a->Init(kReleaseToOSIntervalNever, premapped_heap);
666   std::mt19937 r;
667 
668   typename Allocator::AllocatorCache cache;
669   memset(&cache, 0, sizeof(cache));
670   a->InitCache(&cache);
671 
672   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
673   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
674   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
675   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
676   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
677   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
678 
679   const uptr kNumAllocs = 100000;
680   const uptr kNumIter = 10;
681   for (uptr iter = 0; iter < kNumIter; iter++) {
682     std::vector<void*> allocated;
683     for (uptr i = 0; i < kNumAllocs; i++) {
684       uptr size = (i % (1 << 14)) + 1;
685       if ((i % 1024) == 0)
686         size = 1 << (10 + (i % 14));
687       void *x = a->Allocate(&cache, size, 1);
688       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
689       CHECK_EQ(*meta, 0);
690       *meta = size;
691       allocated.push_back(x);
692     }
693 
694     std::shuffle(allocated.begin(), allocated.end(), r);
695 
696     // Test ForEachChunk(...)
697     {
698       std::set<void *> reported_chunks;
699       auto cb = [](uptr chunk, void *arg) {
700         auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
701         auto pair =
702             reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
703         // Check chunk is never reported more than once.
704         ASSERT_TRUE(pair.second);
705       };
706       a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
707       for (const auto &allocated_ptr : allocated) {
708         ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
709       }
710     }
711 
712     for (uptr i = 0; i < kNumAllocs; i++) {
713       void *x = allocated[i];
714       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
715       CHECK_NE(*meta, 0);
716       CHECK(a->PointerIsMine(x));
717       *meta = 0;
718       a->Deallocate(&cache, x);
719     }
720     allocated.clear();
721     a->SwallowCache(&cache);
722   }
723   a->DestroyCache(&cache);
724   a->TestOnlyUnmap();
725 }
726 
727 #if SANITIZER_CAN_USE_ALLOCATOR64
728 TEST(SanitizerCommon, CombinedAllocator64) {
729   TestCombinedAllocator<Allocator64>();
730 }
731 
732 TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
733   TestCombinedAllocator<Allocator64Dynamic>();
734 }
735 
736 TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
737   ScopedPremappedHeap h;
738   TestCombinedAllocator<Allocator64Dynamic>(h.Addr());
739 }
740 
741 #if !SANITIZER_ANDROID
742 TEST(SanitizerCommon, CombinedAllocator64Compact) {
743   TestCombinedAllocator<Allocator64Compact>();
744 }
745 #endif
746 
747 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
748   TestCombinedAllocator<Allocator64VeryCompact>();
749 }
750 #endif
751 
752 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {
753   TestCombinedAllocator<Allocator32Compact>();
754 }
755 
756 template <class Allocator>
757 void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {
758   using AllocatorCache = typename Allocator::AllocatorCache;
759   AllocatorCache cache;
760   Allocator *a = new Allocator();
761 
762   a->Init(kReleaseToOSIntervalNever, premapped_heap);
763   memset(&cache, 0, sizeof(cache));
764   cache.Init(0);
765 
766   const uptr kNumAllocs = 10000;
767   const int kNumIter = 100;
768   uptr saved_total = 0;
769   for (int class_id = 1; class_id <= 5; class_id++) {
770     for (int it = 0; it < kNumIter; it++) {
771       void *allocated[kNumAllocs];
772       for (uptr i = 0; i < kNumAllocs; i++) {
773         allocated[i] = cache.Allocate(a, class_id);
774       }
775       for (uptr i = 0; i < kNumAllocs; i++) {
776         cache.Deallocate(a, class_id, allocated[i]);
777       }
778       cache.Drain(a);
779       uptr total_allocated = a->TotalMemoryUsed();
780       if (it)
781         CHECK_EQ(saved_total, total_allocated);
782       saved_total = total_allocated;
783     }
784   }
785 
786   a->TestOnlyUnmap();
787   delete a;
788 }
789 
790 #if SANITIZER_CAN_USE_ALLOCATOR64
791 // These tests can fail on Windows if memory is somewhat full and lit happens
792 // to run them all at the same time. FIXME: Make them not flaky and reenable.
793 #if !SANITIZER_WINDOWS
794 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
795   TestSizeClassAllocatorLocalCache<Allocator64>();
796 }
797 
798 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
799   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
800 }
801 
802 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
803   ScopedPremappedHeap h;
804   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
805 }
806 
807 #if !SANITIZER_ANDROID
808 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
809   TestSizeClassAllocatorLocalCache<Allocator64Compact>();
810 }
811 #endif
812 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
813   TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>();
814 }
815 #endif
816 #endif
817 
818 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
819   TestSizeClassAllocatorLocalCache<Allocator32Compact>();
820 }
821 
822 #if SANITIZER_CAN_USE_ALLOCATOR64
823 typedef Allocator64::AllocatorCache AllocatorCache;
824 static AllocatorCache static_allocator_cache;
825 
826 void *AllocatorLeakTestWorker(void *arg) {
827   typedef AllocatorCache::Allocator Allocator;
828   Allocator *a = (Allocator*)(arg);
829   static_allocator_cache.Allocate(a, 10);
830   static_allocator_cache.Drain(a);
831   return 0;
832 }
833 
834 TEST(SanitizerCommon, AllocatorLeakTest) {
835   typedef AllocatorCache::Allocator Allocator;
836   Allocator a;
837   a.Init(kReleaseToOSIntervalNever);
838   uptr total_used_memory = 0;
839   for (int i = 0; i < 100; i++) {
840     pthread_t t;
841     PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
842     PTHREAD_JOIN(t, 0);
843     if (i == 0)
844       total_used_memory = a.TotalMemoryUsed();
845     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
846   }
847 
848   a.TestOnlyUnmap();
849 }
850 
851 // Struct which is allocated to pass info to new threads.  The new thread frees
852 // it.
853 struct NewThreadParams {
854   AllocatorCache *thread_cache;
855   AllocatorCache::Allocator *allocator;
856   uptr class_id;
857 };
858 
859 // Called in a new thread.  Just frees its argument.
860 static void *DeallocNewThreadWorker(void *arg) {
861   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
862   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
863   return NULL;
864 }
865 
866 // The allocator cache is supposed to be POD and zero initialized.  We should be
867 // able to call Deallocate on a zeroed cache, and it will self-initialize.
868 TEST(Allocator, AllocatorCacheDeallocNewThread) {
869   AllocatorCache::Allocator allocator;
870   allocator.Init(kReleaseToOSIntervalNever);
871   AllocatorCache main_cache;
872   AllocatorCache child_cache;
873   memset(&main_cache, 0, sizeof(main_cache));
874   memset(&child_cache, 0, sizeof(child_cache));
875 
876   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
877   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
878       main_cache.Allocate(&allocator, class_id));
879   params->thread_cache = &child_cache;
880   params->allocator = &allocator;
881   params->class_id = class_id;
882   pthread_t t;
883   PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
884   PTHREAD_JOIN(t, 0);
885 
886   allocator.TestOnlyUnmap();
887 }
888 #endif
889 
890 TEST(Allocator, Basic) {
891   char *p = (char*)InternalAlloc(10);
892   EXPECT_NE(p, (char*)0);
893   char *p2 = (char*)InternalAlloc(20);
894   EXPECT_NE(p2, (char*)0);
895   EXPECT_NE(p2, p);
896   InternalFree(p);
897   InternalFree(p2);
898 }
899 
900 TEST(Allocator, Stress) {
901   const int kCount = 1000;
902   char *ptrs[kCount];
903   unsigned rnd = 42;
904   for (int i = 0; i < kCount; i++) {
905     uptr sz = my_rand_r(&rnd) % 1000;
906     char *p = (char*)InternalAlloc(sz);
907     EXPECT_NE(p, (char*)0);
908     ptrs[i] = p;
909   }
910   for (int i = 0; i < kCount; i++) {
911     InternalFree(ptrs[i]);
912   }
913 }
914 
915 TEST(Allocator, LargeAlloc) {
916   void *p = InternalAlloc(10 << 20);
917   InternalFree(p);
918 }
919 
920 TEST(Allocator, ScopedBuffer) {
921   const int kSize = 512;
922   {
923     InternalMmapVector<int> int_buf(kSize);
924     EXPECT_EQ((uptr)kSize, int_buf.size());
925   }
926   InternalMmapVector<char> char_buf(kSize);
927   EXPECT_EQ((uptr)kSize, char_buf.size());
928   internal_memset(char_buf.data(), 'c', kSize);
929   for (int i = 0; i < kSize; i++) {
930     EXPECT_EQ('c', char_buf[i]);
931   }
932 }
933 
934 void IterationTestCallback(uptr chunk, void *arg) {
935   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
936 }
937 
938 template <class Allocator>
939 void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {
940   Allocator *a = new Allocator;
941   a->Init(kReleaseToOSIntervalNever, premapped_heap);
942   typename Allocator::AllocatorCache cache;
943   memset(&cache, 0, sizeof(cache));
944   cache.Init(0);
945 
946   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
947     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
948 
949   std::vector<void *> allocated;
950 
951   // Allocate a bunch of chunks.
952   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
953     uptr size = sizes[s];
954     if (!a->CanAllocate(size, 1)) continue;
955     // printf("s = %ld\n", size);
956     uptr n_iter = std::max((uptr)6, 80000 / size);
957     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
958     for (uptr j = 0; j < n_iter; j++) {
959       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
960       void *x = cache.Allocate(a, class_id0);
961       allocated.push_back(x);
962     }
963   }
964 
965   std::set<uptr> reported_chunks;
966   a->ForceLock();
967   a->ForEachChunk(IterationTestCallback, &reported_chunks);
968   a->ForceUnlock();
969 
970   for (uptr i = 0; i < allocated.size(); i++) {
971     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
972     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
973               reported_chunks.end());
974   }
975 
976   a->TestOnlyUnmap();
977   delete a;
978 }
979 
980 #if SANITIZER_CAN_USE_ALLOCATOR64
981 // These tests can fail on Windows if memory is somewhat full and lit happens
982 // to run them all at the same time. FIXME: Make them not flaky and reenable.
983 #if !SANITIZER_WINDOWS
984 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
985   TestSizeClassAllocatorIteration<Allocator64>();
986 }
987 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
988   TestSizeClassAllocatorIteration<Allocator64Dynamic>();
989 }
990 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
991   ScopedPremappedHeap h;
992   TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
993 }
994 #endif
995 #endif
996 
997 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) {
998   TestSizeClassAllocatorIteration<Allocator32Compact>();
999 }
1000 
1001 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
1002   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1003   a.Init();
1004   AllocatorStats stats;
1005   stats.Init();
1006 
1007   static const uptr kNumAllocs = 1000;
1008   char *allocated[kNumAllocs];
1009   static const uptr size = 40;
1010   // Allocate some.
1011   for (uptr i = 0; i < kNumAllocs; i++)
1012     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1013 
1014   std::set<uptr> reported_chunks;
1015   a.ForceLock();
1016   a.ForEachChunk(IterationTestCallback, &reported_chunks);
1017   a.ForceUnlock();
1018 
1019   for (uptr i = 0; i < kNumAllocs; i++) {
1020     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
1021     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
1022               reported_chunks.end());
1023   }
1024   for (uptr i = 0; i < kNumAllocs; i++)
1025     a.Deallocate(&stats, allocated[i]);
1026 }
1027 
1028 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
1029   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1030   a.Init();
1031   AllocatorStats stats;
1032   stats.Init();
1033 
1034   static const uptr kNumAllocs = 1024;
1035   static const uptr kNumExpectedFalseLookups = 10000000;
1036   char *allocated[kNumAllocs];
1037   static const uptr size = 4096;
1038   // Allocate some.
1039   for (uptr i = 0; i < kNumAllocs; i++) {
1040     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1041   }
1042 
1043   a.ForceLock();
1044   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
1045     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
1046     char *p1 = allocated[i % kNumAllocs];
1047     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
1048     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
1049     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
1050     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
1051   }
1052 
1053   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
1054     void *p = reinterpret_cast<void *>(i % 1024);
1055     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1056     p = reinterpret_cast<void *>(~0L - (i % 1024));
1057     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1058   }
1059   a.ForceUnlock();
1060 
1061   for (uptr i = 0; i < kNumAllocs; i++)
1062     a.Deallocate(&stats, allocated[i]);
1063 }
1064 
1065 
1066 // Don't test OOM conditions on Win64 because it causes other tests on the same
1067 // machine to OOM.
1068 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
1069 typedef __sanitizer::SizeClassMap<3, 4, 8, 38, 128, 16> SpecialSizeClassMap;
1070 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
1071 struct AP64_SpecialSizeClassMap {
1072   static const uptr kSpaceBeg = kAllocatorSpace;
1073   static const uptr kSpaceSize = kAllocatorSize;
1074   static const uptr kMetadataSize = 0;
1075   typedef SpecialSizeClassMap SizeClassMap;
1076   typedef NoOpMapUnmapCallback MapUnmapCallback;
1077   static const uptr kFlags = 0;
1078   using AddressSpaceView = AddressSpaceViewTy;
1079 };
1080 
1081 // Regression test for out-of-memory condition in PopulateFreeList().
1082 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
1083   // In a world where regions are small and chunks are huge...
1084   typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
1085   const uptr kRegionSize =
1086       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
1087   SpecialAllocator64 *a = new SpecialAllocator64;
1088   a->Init(kReleaseToOSIntervalNever);
1089   SpecialAllocator64::AllocatorCache cache;
1090   memset(&cache, 0, sizeof(cache));
1091   cache.Init(0);
1092 
1093   // ...one man is on a mission to overflow a region with a series of
1094   // successive allocations.
1095 
1096   const uptr kClassID = 107;
1097   const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
1098   ASSERT_LT(2 * kAllocationSize, kRegionSize);
1099   ASSERT_GT(3 * kAllocationSize, kRegionSize);
1100   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1101   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1102   EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
1103 
1104   const uptr Class2 = 100;
1105   const uptr Size2 = SpecialSizeClassMap::Size(Class2);
1106   ASSERT_EQ(Size2 * 8, kRegionSize);
1107   char *p[7];
1108   for (int i = 0; i < 7; i++) {
1109     p[i] = (char*)cache.Allocate(a, Class2);
1110     EXPECT_NE(p[i], nullptr);
1111     fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
1112     p[i][Size2 - 1] = 42;
1113     if (i) ASSERT_LT(p[i - 1], p[i]);
1114   }
1115   EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
1116   cache.Deallocate(a, Class2, p[0]);
1117   cache.Drain(a);
1118   ASSERT_EQ(p[6][Size2 - 1], 42);
1119   a->TestOnlyUnmap();
1120   delete a;
1121 }
1122 
1123 #endif
1124 
1125 #if SANITIZER_CAN_USE_ALLOCATOR64
1126 
1127 class NoMemoryMapper {
1128  public:
1129   uptr last_request_buffer_size;
1130 
1131   NoMemoryMapper() : last_request_buffer_size(0) {}
1132 
1133   void *MapPackedCounterArrayBuffer(uptr buffer_size) {
1134     last_request_buffer_size = buffer_size;
1135     return nullptr;
1136   }
1137   void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {}
1138 };
1139 
1140 class RedZoneMemoryMapper {
1141  public:
1142   RedZoneMemoryMapper() {
1143     const auto page_size = GetPageSize();
1144     buffer = MmapOrDie(3ULL * page_size, "");
1145     MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
1146     MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
1147   }
1148   ~RedZoneMemoryMapper() {
1149     UnmapOrDie(buffer, 3 * GetPageSize());
1150   }
1151 
1152   void *MapPackedCounterArrayBuffer(uptr buffer_size) {
1153     const auto page_size = GetPageSize();
1154     CHECK_EQ(buffer_size, page_size);
1155     void *p =
1156         reinterpret_cast<void *>(reinterpret_cast<uptr>(buffer) + page_size);
1157     memset(p, 0, page_size);
1158     return p;
1159   }
1160   void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {}
1161 
1162  private:
1163   void *buffer;
1164 };
1165 
1166 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
1167   NoMemoryMapper no_memory_mapper;
1168   typedef Allocator64::PackedCounterArray<NoMemoryMapper>
1169       NoMemoryPackedCounterArray;
1170 
1171   for (int i = 0; i < 64; i++) {
1172     // Various valid counter's max values packed into one word.
1173     NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper);
1174     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1175 
1176     // Check the "all bit set" values too.
1177     NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper);
1178     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1179 
1180     // Verify the packing ratio, the counter is expected to be packed into the
1181     // closest power of 2 bits.
1182     NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
1183     EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
1184               no_memory_mapper.last_request_buffer_size);
1185   }
1186 
1187   RedZoneMemoryMapper memory_mapper;
1188   typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper>
1189       RedZonePackedCounterArray;
1190   // Go through 1, 2, 4, 8, .. 64 bits per counter.
1191   for (int i = 0; i < 7; i++) {
1192     // Make sure counters request one memory page for the buffer.
1193     const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
1194     RedZonePackedCounterArray counters(kNumCounters,
1195                                        1ULL << ((1 << i) - 1),
1196                                        &memory_mapper);
1197     counters.Inc(0);
1198     for (u64 c = 1; c < kNumCounters - 1; c++) {
1199       ASSERT_EQ(0ULL, counters.Get(c));
1200       counters.Inc(c);
1201       ASSERT_EQ(1ULL, counters.Get(c - 1));
1202     }
1203     ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
1204     counters.Inc(kNumCounters - 1);
1205 
1206     if (i > 0) {
1207       counters.IncRange(0, kNumCounters - 1);
1208       for (u64 c = 0; c < kNumCounters; c++)
1209         ASSERT_EQ(2ULL, counters.Get(c));
1210     }
1211   }
1212 }
1213 
1214 class RangeRecorder {
1215  public:
1216   std::string reported_pages;
1217 
1218   RangeRecorder()
1219       : page_size_scaled_log(
1220             Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
1221         last_page_reported(0) {}
1222 
1223   void ReleasePageRangeToOS(u32 from, u32 to) {
1224     from >>= page_size_scaled_log;
1225     to >>= page_size_scaled_log;
1226     ASSERT_LT(from, to);
1227     if (!reported_pages.empty())
1228       ASSERT_LT(last_page_reported, from);
1229     reported_pages.append(from - last_page_reported, '.');
1230     reported_pages.append(to - from, 'x');
1231     last_page_reported = to;
1232   }
1233  private:
1234   const uptr page_size_scaled_log;
1235   u32 last_page_reported;
1236 };
1237 
1238 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
1239   typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
1240 
1241   // 'x' denotes a page to be released, '.' denotes a page to be kept around.
1242   const char* test_cases[] = {
1243       "",
1244       ".",
1245       "x",
1246       "........",
1247       "xxxxxxxxxxx",
1248       "..............xxxxx",
1249       "xxxxxxxxxxxxxxxxxx.....",
1250       "......xxxxxxxx........",
1251       "xxx..........xxxxxxxxxxxxxxx",
1252       "......xxxx....xxxx........",
1253       "xxx..........xxxxxxxx....xxxxxxx",
1254       "x.x.x.x.x.x.x.x.x.x.x.x.",
1255       ".x.x.x.x.x.x.x.x.x.x.x.x",
1256       ".x.x.x.x.x.x.x.x.x.x.x.x.",
1257       "x.x.x.x.x.x.x.x.x.x.x.x.x",
1258   };
1259 
1260   for (auto test_case : test_cases) {
1261     RangeRecorder range_recorder;
1262     RangeTracker tracker(&range_recorder);
1263     for (int i = 0; test_case[i] != 0; i++)
1264       tracker.NextPage(test_case[i] == 'x');
1265     tracker.Done();
1266     // Strip trailing '.'-pages before comparing the results as they are not
1267     // going to be reported to range_recorder anyway.
1268     const char* last_x = strrchr(test_case, 'x');
1269     std::string expected(
1270         test_case,
1271         last_x == nullptr ? 0 : (last_x - test_case + 1));
1272     EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
1273   }
1274 }
1275 
1276 class ReleasedPagesTrackingMemoryMapper {
1277  public:
1278   std::set<u32> reported_pages;
1279 
1280   void *MapPackedCounterArrayBuffer(uptr buffer_size) {
1281     reported_pages.clear();
1282     return calloc(1, buffer_size);
1283   }
1284   void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
1285     free(buffer);
1286   }
1287 
1288   void ReleasePageRangeToOS(u32 from, u32 to) {
1289     uptr page_size_scaled =
1290         GetPageSizeCached() >> Allocator64::kCompactPtrScale;
1291     for (u32 i = from; i < to; i += page_size_scaled)
1292       reported_pages.insert(i);
1293   }
1294 };
1295 
1296 template <class Allocator>
1297 void TestReleaseFreeMemoryToOS() {
1298   ReleasedPagesTrackingMemoryMapper memory_mapper;
1299   const uptr kAllocatedPagesCount = 1024;
1300   const uptr page_size = GetPageSizeCached();
1301   const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
1302   std::mt19937 r;
1303   uint32_t rnd_state = 42;
1304 
1305   for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
1306       class_id++) {
1307     const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
1308     const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
1309     const uptr max_chunks =
1310         kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
1311 
1312     // Generate the random free list.
1313     std::vector<u32> free_array;
1314     bool in_free_range = false;
1315     uptr current_range_end = 0;
1316     for (uptr i = 0; i < max_chunks; i++) {
1317       if (i == current_range_end) {
1318         in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
1319         current_range_end += my_rand_r(&rnd_state) % 100 + 1;
1320       }
1321       if (in_free_range)
1322         free_array.push_back(i * chunk_size_scaled);
1323     }
1324     if (free_array.empty())
1325       continue;
1326     // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
1327     // the list ordering.
1328     std::shuffle(free_array.begin(), free_array.end(), r);
1329 
1330     Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
1331                                      chunk_size, kAllocatedPagesCount,
1332                                      &memory_mapper);
1333 
1334     // Verify that there are no released pages touched by used chunks and all
1335     // ranges of free chunks big enough to contain the entire memory pages had
1336     // these pages released.
1337     uptr verified_released_pages = 0;
1338     std::set<u32> free_chunks(free_array.begin(), free_array.end());
1339 
1340     u32 current_chunk = 0;
1341     in_free_range = false;
1342     u32 current_free_range_start = 0;
1343     for (uptr i = 0; i <= max_chunks; i++) {
1344       bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
1345 
1346       if (is_free_chunk) {
1347         if (!in_free_range) {
1348           in_free_range = true;
1349           current_free_range_start = current_chunk;
1350         }
1351       } else {
1352         // Verify that this used chunk does not touch any released page.
1353         for (uptr i_page = current_chunk / page_size_scaled;
1354              i_page <= (current_chunk + chunk_size_scaled - 1) /
1355                        page_size_scaled;
1356              i_page++) {
1357           bool page_released =
1358               memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
1359               memory_mapper.reported_pages.end();
1360           ASSERT_EQ(false, page_released);
1361         }
1362 
1363         if (in_free_range) {
1364           in_free_range = false;
1365           // Verify that all entire memory pages covered by this range of free
1366           // chunks were released.
1367           u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
1368           while (page + page_size_scaled <= current_chunk) {
1369             bool page_released =
1370                 memory_mapper.reported_pages.find(page) !=
1371                 memory_mapper.reported_pages.end();
1372             ASSERT_EQ(true, page_released);
1373             verified_released_pages++;
1374             page += page_size_scaled;
1375           }
1376         }
1377       }
1378 
1379       current_chunk += chunk_size_scaled;
1380     }
1381 
1382     ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
1383   }
1384 }
1385 
1386 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
1387   TestReleaseFreeMemoryToOS<Allocator64>();
1388 }
1389 
1390 #if !SANITIZER_ANDROID
1391 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
1392   TestReleaseFreeMemoryToOS<Allocator64Compact>();
1393 }
1394 
1395 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
1396   TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
1397 }
1398 #endif  // !SANITIZER_ANDROID
1399 
1400 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
1401 
1402 TEST(SanitizerCommon, TwoLevelByteMap) {
1403   const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
1404   const u64 n = kSize1 * kSize2;
1405   TwoLevelByteMap<kSize1, kSize2> m;
1406   m.Init();
1407   for (u64 i = 0; i < n; i += 7) {
1408     m.set(i, (i % 100) + 1);
1409   }
1410   for (u64 j = 0; j < n; j++) {
1411     if (j % 7)
1412       EXPECT_EQ(m[j], 0);
1413     else
1414       EXPECT_EQ(m[j], (j % 100) + 1);
1415   }
1416 
1417   m.TestOnlyUnmap();
1418 }
1419 
1420 template <typename AddressSpaceView>
1421 using TestByteMapASVT =
1422     TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>;
1423 using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>;
1424 
1425 struct TestByteMapParam {
1426   TestByteMap *m;
1427   size_t shard;
1428   size_t num_shards;
1429 };
1430 
1431 void *TwoLevelByteMapUserThread(void *param) {
1432   TestByteMapParam *p = (TestByteMapParam*)param;
1433   for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
1434     size_t val = (i % 100) + 1;
1435     p->m->set(i, val);
1436     EXPECT_EQ((*p->m)[i], val);
1437   }
1438   return 0;
1439 }
1440 
1441 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
1442   TestByteMap m;
1443   m.Init();
1444   TestMapUnmapCallback::map_count = 0;
1445   TestMapUnmapCallback::unmap_count = 0;
1446   static const int kNumThreads = 4;
1447   pthread_t t[kNumThreads];
1448   TestByteMapParam p[kNumThreads];
1449   for (int i = 0; i < kNumThreads; i++) {
1450     p[i].m = &m;
1451     p[i].shard = i;
1452     p[i].num_shards = kNumThreads;
1453     PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
1454   }
1455   for (int i = 0; i < kNumThreads; i++) {
1456     PTHREAD_JOIN(t[i], 0);
1457   }
1458   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1459   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
1460   m.TestOnlyUnmap();
1461   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1462   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
1463 }
1464 
1465 TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) {
1466   // When allocating a memory block slightly bigger than a memory page and
1467   // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round
1468   // the size up to the page size, so that subsequent calls to the allocator
1469   // can use the remaining space in the last allocated page.
1470   static LowLevelAllocator allocator;
1471   char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16);
1472   char *ptr2 = (char *)allocator.Allocate(16);
1473   EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16);
1474 }
1475 
1476 #endif  // #if !SANITIZER_DEBUG
1477