1 //===-- sanitizer_allocator_test.cpp --------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime. 10 // Tests for sanitizer_allocator.h. 11 // 12 //===----------------------------------------------------------------------===// 13 #include "sanitizer_common/sanitizer_allocator.h" 14 #include "sanitizer_common/sanitizer_allocator_internal.h" 15 #include "sanitizer_common/sanitizer_common.h" 16 17 #include "sanitizer_test_utils.h" 18 #include "sanitizer_pthread_wrappers.h" 19 20 #include "gtest/gtest.h" 21 22 #include <stdio.h> 23 #include <stdlib.h> 24 #include <algorithm> 25 #include <vector> 26 #include <random> 27 #include <set> 28 29 using namespace __sanitizer; 30 31 #if SANITIZER_SOLARIS && defined(__sparcv9) 32 // FIXME: These tests probably fail because Solaris/sparcv9 uses the full 33 // 64-bit address space. Needs more investigation 34 #define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x 35 #else 36 #define SKIP_ON_SOLARIS_SPARCV9(x) x 37 #endif 38 39 // Too slow for debug build 40 #if !SANITIZER_DEBUG 41 42 #if SANITIZER_CAN_USE_ALLOCATOR64 43 #if SANITIZER_WINDOWS 44 // On Windows 64-bit there is no easy way to find a large enough fixed address 45 // space that is always available. Thus, a dynamically allocated address space 46 // is used instead (i.e. ~(uptr)0). 47 static const uptr kAllocatorSpace = ~(uptr)0; 48 static const uptr kAllocatorSize = 0x8000000000ULL; // 500G 49 static const u64 kAddressSpaceSize = 1ULL << 47; 50 typedef DefaultSizeClassMap SizeClassMap; 51 #elif SANITIZER_ANDROID && defined(__aarch64__) 52 static const uptr kAllocatorSpace = 0x3000000000ULL; 53 static const uptr kAllocatorSize = 0x2000000000ULL; 54 static const u64 kAddressSpaceSize = 1ULL << 39; 55 typedef VeryCompactSizeClassMap SizeClassMap; 56 #else 57 static const uptr kAllocatorSpace = 0x700000000000ULL; 58 static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. 59 static const u64 kAddressSpaceSize = 1ULL << 47; 60 typedef DefaultSizeClassMap SizeClassMap; 61 #endif 62 63 template <typename AddressSpaceViewTy> 64 struct AP64 { // Allocator Params. Short name for shorter demangled names.. 65 static const uptr kSpaceBeg = kAllocatorSpace; 66 static const uptr kSpaceSize = kAllocatorSize; 67 static const uptr kMetadataSize = 16; 68 typedef ::SizeClassMap SizeClassMap; 69 typedef NoOpMapUnmapCallback MapUnmapCallback; 70 static const uptr kFlags = 0; 71 using AddressSpaceView = AddressSpaceViewTy; 72 }; 73 74 template <typename AddressSpaceViewTy> 75 struct AP64Dyn { 76 static const uptr kSpaceBeg = ~(uptr)0; 77 static const uptr kSpaceSize = kAllocatorSize; 78 static const uptr kMetadataSize = 16; 79 typedef ::SizeClassMap SizeClassMap; 80 typedef NoOpMapUnmapCallback MapUnmapCallback; 81 static const uptr kFlags = 0; 82 using AddressSpaceView = AddressSpaceViewTy; 83 }; 84 85 template <typename AddressSpaceViewTy> 86 struct AP64Compact { 87 static const uptr kSpaceBeg = ~(uptr)0; 88 static const uptr kSpaceSize = kAllocatorSize; 89 static const uptr kMetadataSize = 16; 90 typedef CompactSizeClassMap SizeClassMap; 91 typedef NoOpMapUnmapCallback MapUnmapCallback; 92 static const uptr kFlags = 0; 93 using AddressSpaceView = AddressSpaceViewTy; 94 }; 95 96 template <typename AddressSpaceViewTy> 97 struct AP64VeryCompact { 98 static const uptr kSpaceBeg = ~(uptr)0; 99 static const uptr kSpaceSize = 1ULL << 37; 100 static const uptr kMetadataSize = 16; 101 typedef VeryCompactSizeClassMap SizeClassMap; 102 typedef NoOpMapUnmapCallback MapUnmapCallback; 103 static const uptr kFlags = 0; 104 using AddressSpaceView = AddressSpaceViewTy; 105 }; 106 107 template <typename AddressSpaceViewTy> 108 struct AP64Dense { 109 static const uptr kSpaceBeg = kAllocatorSpace; 110 static const uptr kSpaceSize = kAllocatorSize; 111 static const uptr kMetadataSize = 16; 112 typedef DenseSizeClassMap SizeClassMap; 113 typedef NoOpMapUnmapCallback MapUnmapCallback; 114 static const uptr kFlags = 0; 115 using AddressSpaceView = AddressSpaceViewTy; 116 }; 117 118 template <typename AddressSpaceView> 119 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>; 120 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>; 121 122 template <typename AddressSpaceView> 123 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>; 124 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>; 125 126 template <typename AddressSpaceView> 127 using Allocator64CompactASVT = 128 SizeClassAllocator64<AP64Compact<AddressSpaceView>>; 129 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>; 130 131 template <typename AddressSpaceView> 132 using Allocator64VeryCompactASVT = 133 SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>; 134 using Allocator64VeryCompact = 135 Allocator64VeryCompactASVT<LocalAddressSpaceView>; 136 137 template <typename AddressSpaceView> 138 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>; 139 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>; 140 141 #elif defined(__mips64) 142 static const u64 kAddressSpaceSize = 1ULL << 40; 143 #elif defined(__aarch64__) 144 static const u64 kAddressSpaceSize = 1ULL << 39; 145 #elif defined(__s390x__) 146 static const u64 kAddressSpaceSize = 1ULL << 53; 147 #elif defined(__s390__) 148 static const u64 kAddressSpaceSize = 1ULL << 31; 149 #else 150 static const u64 kAddressSpaceSize = 1ULL << 32; 151 #endif 152 153 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); 154 155 template <typename AddressSpaceViewTy> 156 struct AP32Compact { 157 static const uptr kSpaceBeg = 0; 158 static const u64 kSpaceSize = kAddressSpaceSize; 159 static const uptr kMetadataSize = 16; 160 typedef CompactSizeClassMap SizeClassMap; 161 static const uptr kRegionSizeLog = ::kRegionSizeLog; 162 using AddressSpaceView = AddressSpaceViewTy; 163 typedef NoOpMapUnmapCallback MapUnmapCallback; 164 static const uptr kFlags = 0; 165 }; 166 template <typename AddressSpaceView> 167 using Allocator32CompactASVT = 168 SizeClassAllocator32<AP32Compact<AddressSpaceView>>; 169 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>; 170 171 template <class SizeClassMap> 172 void TestSizeClassMap() { 173 typedef SizeClassMap SCMap; 174 SCMap::Print(); 175 SCMap::Validate(); 176 } 177 178 TEST(SanitizerCommon, DefaultSizeClassMap) { 179 TestSizeClassMap<DefaultSizeClassMap>(); 180 } 181 182 TEST(SanitizerCommon, CompactSizeClassMap) { 183 TestSizeClassMap<CompactSizeClassMap>(); 184 } 185 186 TEST(SanitizerCommon, VeryCompactSizeClassMap) { 187 TestSizeClassMap<VeryCompactSizeClassMap>(); 188 } 189 190 TEST(SanitizerCommon, InternalSizeClassMap) { 191 TestSizeClassMap<InternalSizeClassMap>(); 192 } 193 194 TEST(SanitizerCommon, DenseSizeClassMap) { 195 TestSizeClassMap<VeryCompactSizeClassMap>(); 196 } 197 198 template <class Allocator> 199 void TestSizeClassAllocator(uptr premapped_heap = 0) { 200 Allocator *a = new Allocator; 201 a->Init(kReleaseToOSIntervalNever, premapped_heap); 202 typename Allocator::AllocatorCache cache; 203 memset(&cache, 0, sizeof(cache)); 204 cache.Init(0); 205 206 static const uptr sizes[] = { 207 1, 16, 30, 40, 100, 1000, 10000, 208 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000 209 }; 210 211 std::vector<void *> allocated; 212 213 uptr last_total_allocated = 0; 214 for (int i = 0; i < 3; i++) { 215 // Allocate a bunch of chunks. 216 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { 217 uptr size = sizes[s]; 218 if (!a->CanAllocate(size, 1)) continue; 219 // printf("s = %ld\n", size); 220 uptr n_iter = std::max((uptr)6, 4000000 / size); 221 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); 222 for (uptr i = 0; i < n_iter; i++) { 223 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); 224 char *x = (char*)cache.Allocate(a, class_id0); 225 x[0] = 0; 226 x[size - 1] = 0; 227 x[size / 2] = 0; 228 allocated.push_back(x); 229 CHECK_EQ(x, a->GetBlockBegin(x)); 230 CHECK_EQ(x, a->GetBlockBegin(x + size - 1)); 231 CHECK(a->PointerIsMine(x)); 232 CHECK(a->PointerIsMine(x + size - 1)); 233 CHECK(a->PointerIsMine(x + size / 2)); 234 CHECK_GE(a->GetActuallyAllocatedSize(x), size); 235 uptr class_id = a->GetSizeClass(x); 236 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size)); 237 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); 238 metadata[0] = reinterpret_cast<uptr>(x) + 1; 239 metadata[1] = 0xABCD; 240 } 241 } 242 // Deallocate all. 243 for (uptr i = 0; i < allocated.size(); i++) { 244 void *x = allocated[i]; 245 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); 246 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1); 247 CHECK_EQ(metadata[1], 0xABCD); 248 cache.Deallocate(a, a->GetSizeClass(x), x); 249 } 250 allocated.clear(); 251 uptr total_allocated = a->TotalMemoryUsed(); 252 if (last_total_allocated == 0) 253 last_total_allocated = total_allocated; 254 CHECK_EQ(last_total_allocated, total_allocated); 255 } 256 257 // Check that GetBlockBegin never crashes. 258 for (uptr x = 0, step = kAddressSpaceSize / 100000; 259 x < kAddressSpaceSize - step; x += step) 260 if (a->PointerIsMine(reinterpret_cast<void *>(x))) 261 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x))); 262 263 a->TestOnlyUnmap(); 264 delete a; 265 } 266 267 #if SANITIZER_CAN_USE_ALLOCATOR64 268 269 // Allocates kAllocatorSize aligned bytes on construction and frees it on 270 // destruction. 271 class ScopedPremappedHeap { 272 public: 273 ScopedPremappedHeap() { 274 BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap"); 275 AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize); 276 } 277 278 ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); } 279 280 uptr Addr() { return AlignedAddr; } 281 282 private: 283 void *BasePtr; 284 uptr AlignedAddr; 285 }; 286 287 // These tests can fail on Windows if memory is somewhat full and lit happens 288 // to run them all at the same time. FIXME: Make them not flaky and reenable. 289 #if !SANITIZER_WINDOWS 290 TEST(SanitizerCommon, SizeClassAllocator64) { 291 TestSizeClassAllocator<Allocator64>(); 292 } 293 294 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) { 295 TestSizeClassAllocator<Allocator64Dynamic>(); 296 } 297 298 #if !SANITIZER_ANDROID 299 // Android only has 39-bit address space, so mapping 2 * kAllocatorSize 300 // sometimes fails. 301 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) { 302 ScopedPremappedHeap h; 303 TestSizeClassAllocator<Allocator64Dynamic>(h.Addr()); 304 } 305 306 //FIXME(kostyak): find values so that those work on Android as well. 307 TEST(SanitizerCommon, SizeClassAllocator64Compact) { 308 TestSizeClassAllocator<Allocator64Compact>(); 309 } 310 311 TEST(SanitizerCommon, SizeClassAllocator64Dense) { 312 TestSizeClassAllocator<Allocator64Dense>(); 313 } 314 #endif 315 316 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) { 317 TestSizeClassAllocator<Allocator64VeryCompact>(); 318 } 319 #endif 320 #endif 321 322 TEST(SanitizerCommon, SizeClassAllocator32Compact) { 323 TestSizeClassAllocator<Allocator32Compact>(); 324 } 325 326 template <typename AddressSpaceViewTy> 327 struct AP32SeparateBatches { 328 static const uptr kSpaceBeg = 0; 329 static const u64 kSpaceSize = kAddressSpaceSize; 330 static const uptr kMetadataSize = 16; 331 typedef DefaultSizeClassMap SizeClassMap; 332 static const uptr kRegionSizeLog = ::kRegionSizeLog; 333 using AddressSpaceView = AddressSpaceViewTy; 334 typedef NoOpMapUnmapCallback MapUnmapCallback; 335 static const uptr kFlags = 336 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; 337 }; 338 template <typename AddressSpaceView> 339 using Allocator32SeparateBatchesASVT = 340 SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>; 341 using Allocator32SeparateBatches = 342 Allocator32SeparateBatchesASVT<LocalAddressSpaceView>; 343 344 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) { 345 TestSizeClassAllocator<Allocator32SeparateBatches>(); 346 } 347 348 template <class Allocator> 349 void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) { 350 Allocator *a = new Allocator; 351 a->Init(kReleaseToOSIntervalNever, premapped_heap); 352 typename Allocator::AllocatorCache cache; 353 memset(&cache, 0, sizeof(cache)); 354 cache.Init(0); 355 356 const uptr kNumAllocs = 1 << 13; 357 void *allocated[kNumAllocs]; 358 void *meta[kNumAllocs]; 359 for (uptr i = 0; i < kNumAllocs; i++) { 360 void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1)); 361 allocated[i] = x; 362 meta[i] = a->GetMetaData(x); 363 } 364 // Get Metadata kNumAllocs^2 times. 365 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { 366 uptr idx = i % kNumAllocs; 367 void *m = a->GetMetaData(allocated[idx]); 368 EXPECT_EQ(m, meta[idx]); 369 } 370 for (uptr i = 0; i < kNumAllocs; i++) { 371 cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]); 372 } 373 374 a->TestOnlyUnmap(); 375 delete a; 376 } 377 378 #if SANITIZER_CAN_USE_ALLOCATOR64 379 // These tests can fail on Windows if memory is somewhat full and lit happens 380 // to run them all at the same time. FIXME: Make them not flaky and reenable. 381 #if !SANITIZER_WINDOWS 382 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) { 383 SizeClassAllocatorMetadataStress<Allocator64>(); 384 } 385 386 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) { 387 SizeClassAllocatorMetadataStress<Allocator64Dynamic>(); 388 } 389 390 #if !SANITIZER_ANDROID 391 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) { 392 ScopedPremappedHeap h; 393 SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr()); 394 } 395 396 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { 397 SizeClassAllocatorMetadataStress<Allocator64Compact>(); 398 } 399 #endif 400 401 #endif 402 #endif // SANITIZER_CAN_USE_ALLOCATOR64 403 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) { 404 SizeClassAllocatorMetadataStress<Allocator32Compact>(); 405 } 406 407 template <class Allocator> 408 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize, 409 uptr premapped_heap = 0) { 410 Allocator *a = new Allocator; 411 a->Init(kReleaseToOSIntervalNever, premapped_heap); 412 typename Allocator::AllocatorCache cache; 413 memset(&cache, 0, sizeof(cache)); 414 cache.Init(0); 415 416 uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID; 417 uptr size = Allocator::SizeClassMapT::Size(max_size_class); 418 // Make sure we correctly compute GetBlockBegin() w/o overflow. 419 for (size_t i = 0; i <= TotalSize / size; i++) { 420 void *x = cache.Allocate(a, max_size_class); 421 void *beg = a->GetBlockBegin(x); 422 // if ((i & (i - 1)) == 0) 423 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg); 424 EXPECT_EQ(x, beg); 425 } 426 427 a->TestOnlyUnmap(); 428 delete a; 429 } 430 431 #if SANITIZER_CAN_USE_ALLOCATOR64 432 // These tests can fail on Windows if memory is somewhat full and lit happens 433 // to run them all at the same time. FIXME: Make them not flaky and reenable. 434 #if !SANITIZER_WINDOWS 435 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) { 436 SizeClassAllocatorGetBlockBeginStress<Allocator64>( 437 1ULL << (SANITIZER_ANDROID ? 31 : 33)); 438 } 439 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) { 440 SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( 441 1ULL << (SANITIZER_ANDROID ? 31 : 33)); 442 } 443 #if !SANITIZER_ANDROID 444 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) { 445 ScopedPremappedHeap h; 446 SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( 447 1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr()); 448 } 449 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { 450 SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33); 451 } 452 #endif 453 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) { 454 // Does not have > 4Gb for each class. 455 SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31); 456 } 457 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) { 458 SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33); 459 } 460 #endif 461 #endif // SANITIZER_CAN_USE_ALLOCATOR64 462 463 struct TestMapUnmapCallback { 464 static int map_count, unmap_count; 465 void OnMap(uptr p, uptr size) const { map_count++; } 466 void OnUnmap(uptr p, uptr size) const { unmap_count++; } 467 }; 468 int TestMapUnmapCallback::map_count; 469 int TestMapUnmapCallback::unmap_count; 470 471 #if SANITIZER_CAN_USE_ALLOCATOR64 472 // These tests can fail on Windows if memory is somewhat full and lit happens 473 // to run them all at the same time. FIXME: Make them not flaky and reenable. 474 #if !SANITIZER_WINDOWS 475 476 template <typename AddressSpaceViewTy = LocalAddressSpaceView> 477 struct AP64WithCallback { 478 static const uptr kSpaceBeg = kAllocatorSpace; 479 static const uptr kSpaceSize = kAllocatorSize; 480 static const uptr kMetadataSize = 16; 481 typedef ::SizeClassMap SizeClassMap; 482 typedef TestMapUnmapCallback MapUnmapCallback; 483 static const uptr kFlags = 0; 484 using AddressSpaceView = AddressSpaceViewTy; 485 }; 486 487 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { 488 TestMapUnmapCallback::map_count = 0; 489 TestMapUnmapCallback::unmap_count = 0; 490 typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack; 491 Allocator64WithCallBack *a = new Allocator64WithCallBack; 492 a->Init(kReleaseToOSIntervalNever); 493 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. 494 typename Allocator64WithCallBack::AllocatorCache cache; 495 memset(&cache, 0, sizeof(cache)); 496 cache.Init(0); 497 AllocatorStats stats; 498 stats.Init(); 499 const size_t kNumChunks = 128; 500 uint32_t chunks[kNumChunks]; 501 a->GetFromAllocator(&stats, 30, chunks, kNumChunks); 502 // State + alloc + metadata + freearray. 503 EXPECT_EQ(TestMapUnmapCallback::map_count, 4); 504 a->TestOnlyUnmap(); 505 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing. 506 delete a; 507 } 508 #endif 509 #endif 510 511 template <typename AddressSpaceViewTy = LocalAddressSpaceView> 512 struct AP32WithCallback { 513 static const uptr kSpaceBeg = 0; 514 static const u64 kSpaceSize = kAddressSpaceSize; 515 static const uptr kMetadataSize = 16; 516 typedef CompactSizeClassMap SizeClassMap; 517 static const uptr kRegionSizeLog = ::kRegionSizeLog; 518 using AddressSpaceView = AddressSpaceViewTy; 519 typedef TestMapUnmapCallback MapUnmapCallback; 520 static const uptr kFlags = 0; 521 }; 522 523 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { 524 TestMapUnmapCallback::map_count = 0; 525 TestMapUnmapCallback::unmap_count = 0; 526 typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack; 527 Allocator32WithCallBack *a = new Allocator32WithCallBack; 528 a->Init(kReleaseToOSIntervalNever); 529 EXPECT_EQ(TestMapUnmapCallback::map_count, 0); 530 Allocator32WithCallBack::AllocatorCache cache; 531 memset(&cache, 0, sizeof(cache)); 532 cache.Init(0); 533 AllocatorStats stats; 534 stats.Init(); 535 a->AllocateBatch(&stats, &cache, 32); 536 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); 537 a->TestOnlyUnmap(); 538 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); 539 delete a; 540 // fprintf(stderr, "Map: %d Unmap: %d\n", 541 // TestMapUnmapCallback::map_count, 542 // TestMapUnmapCallback::unmap_count); 543 } 544 545 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { 546 TestMapUnmapCallback::map_count = 0; 547 TestMapUnmapCallback::unmap_count = 0; 548 LargeMmapAllocator<TestMapUnmapCallback> a; 549 a.Init(); 550 AllocatorStats stats; 551 stats.Init(); 552 void *x = a.Allocate(&stats, 1 << 20, 1); 553 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); 554 a.Deallocate(&stats, x); 555 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); 556 } 557 558 // Don't test OOM conditions on Win64 because it causes other tests on the same 559 // machine to OOM. 560 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID 561 TEST(SanitizerCommon, SizeClassAllocator64Overflow) { 562 Allocator64 a; 563 a.Init(kReleaseToOSIntervalNever); 564 Allocator64::AllocatorCache cache; 565 memset(&cache, 0, sizeof(cache)); 566 cache.Init(0); 567 AllocatorStats stats; 568 stats.Init(); 569 570 const size_t kNumChunks = 128; 571 uint32_t chunks[kNumChunks]; 572 bool allocation_failed = false; 573 for (int i = 0; i < 1000000; i++) { 574 if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) { 575 allocation_failed = true; 576 break; 577 } 578 } 579 EXPECT_EQ(allocation_failed, true); 580 581 a.TestOnlyUnmap(); 582 } 583 #endif 584 585 TEST(SanitizerCommon, LargeMmapAllocator) { 586 LargeMmapAllocator<NoOpMapUnmapCallback> a; 587 a.Init(); 588 AllocatorStats stats; 589 stats.Init(); 590 591 static const int kNumAllocs = 1000; 592 char *allocated[kNumAllocs]; 593 static const uptr size = 4000; 594 // Allocate some. 595 for (int i = 0; i < kNumAllocs; i++) { 596 allocated[i] = (char *)a.Allocate(&stats, size, 1); 597 CHECK(a.PointerIsMine(allocated[i])); 598 } 599 // Deallocate all. 600 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); 601 for (int i = 0; i < kNumAllocs; i++) { 602 char *p = allocated[i]; 603 CHECK(a.PointerIsMine(p)); 604 a.Deallocate(&stats, p); 605 } 606 // Check that non left. 607 CHECK_EQ(a.TotalMemoryUsed(), 0); 608 609 // Allocate some more, also add metadata. 610 for (int i = 0; i < kNumAllocs; i++) { 611 char *x = (char *)a.Allocate(&stats, size, 1); 612 CHECK_GE(a.GetActuallyAllocatedSize(x), size); 613 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); 614 *meta = i; 615 allocated[i] = x; 616 } 617 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) { 618 char *p = allocated[i % kNumAllocs]; 619 CHECK(a.PointerIsMine(p)); 620 CHECK(a.PointerIsMine(p + 2000)); 621 } 622 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); 623 // Deallocate all in reverse order. 624 for (int i = 0; i < kNumAllocs; i++) { 625 int idx = kNumAllocs - i - 1; 626 char *p = allocated[idx]; 627 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p)); 628 CHECK_EQ(*meta, idx); 629 CHECK(a.PointerIsMine(p)); 630 a.Deallocate(&stats, p); 631 } 632 CHECK_EQ(a.TotalMemoryUsed(), 0); 633 634 // Test alignments. Test with 512MB alignment on x64 non-Windows machines. 635 // Windows doesn't overcommit, and many machines do not have 51.2GB of swap. 636 uptr max_alignment = 637 (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24); 638 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) { 639 const uptr kNumAlignedAllocs = 100; 640 for (uptr i = 0; i < kNumAlignedAllocs; i++) { 641 uptr size = ((i % 10) + 1) * 4096; 642 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment); 643 CHECK_EQ(p, a.GetBlockBegin(p)); 644 CHECK_EQ(p, a.GetBlockBegin(p + size - 1)); 645 CHECK_EQ(p, a.GetBlockBegin(p + size / 2)); 646 CHECK_EQ(0, (uptr)allocated[i] % alignment); 647 p[0] = p[size - 1] = 0; 648 } 649 for (uptr i = 0; i < kNumAlignedAllocs; i++) { 650 a.Deallocate(&stats, allocated[i]); 651 } 652 } 653 654 // Regression test for boundary condition in GetBlockBegin(). 655 uptr page_size = GetPageSizeCached(); 656 char *p = (char *)a.Allocate(&stats, page_size, 1); 657 CHECK_EQ(p, a.GetBlockBegin(p)); 658 CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1)); 659 CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size)); 660 a.Deallocate(&stats, p); 661 } 662 663 template <class PrimaryAllocator> 664 void TestCombinedAllocator(uptr premapped_heap = 0) { 665 typedef CombinedAllocator<PrimaryAllocator> Allocator; 666 Allocator *a = new Allocator; 667 a->Init(kReleaseToOSIntervalNever, premapped_heap); 668 std::mt19937 r; 669 670 typename Allocator::AllocatorCache cache; 671 memset(&cache, 0, sizeof(cache)); 672 a->InitCache(&cache); 673 674 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); 675 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0); 676 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); 677 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); 678 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); 679 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); 680 681 const uptr kNumAllocs = 100000; 682 const uptr kNumIter = 10; 683 for (uptr iter = 0; iter < kNumIter; iter++) { 684 std::vector<void*> allocated; 685 for (uptr i = 0; i < kNumAllocs; i++) { 686 uptr size = (i % (1 << 14)) + 1; 687 if ((i % 1024) == 0) 688 size = 1 << (10 + (i % 14)); 689 void *x = a->Allocate(&cache, size, 1); 690 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); 691 CHECK_EQ(*meta, 0); 692 *meta = size; 693 allocated.push_back(x); 694 } 695 696 std::shuffle(allocated.begin(), allocated.end(), r); 697 698 // Test ForEachChunk(...) 699 { 700 std::set<void *> reported_chunks; 701 auto cb = [](uptr chunk, void *arg) { 702 auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg); 703 auto pair = 704 reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk)); 705 // Check chunk is never reported more than once. 706 ASSERT_TRUE(pair.second); 707 }; 708 a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks)); 709 for (const auto &allocated_ptr : allocated) { 710 ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end()); 711 } 712 } 713 714 for (uptr i = 0; i < kNumAllocs; i++) { 715 void *x = allocated[i]; 716 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); 717 CHECK_NE(*meta, 0); 718 CHECK(a->PointerIsMine(x)); 719 *meta = 0; 720 a->Deallocate(&cache, x); 721 } 722 allocated.clear(); 723 a->SwallowCache(&cache); 724 } 725 a->DestroyCache(&cache); 726 a->TestOnlyUnmap(); 727 } 728 729 #if SANITIZER_CAN_USE_ALLOCATOR64 730 TEST(SanitizerCommon, CombinedAllocator64) { 731 TestCombinedAllocator<Allocator64>(); 732 } 733 734 TEST(SanitizerCommon, CombinedAllocator64Dynamic) { 735 TestCombinedAllocator<Allocator64Dynamic>(); 736 } 737 738 #if !SANITIZER_ANDROID 739 #if !SANITIZER_WINDOWS 740 // Windows fails to map 1TB, so disable this test. 741 TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) { 742 ScopedPremappedHeap h; 743 TestCombinedAllocator<Allocator64Dynamic>(h.Addr()); 744 } 745 #endif 746 747 TEST(SanitizerCommon, CombinedAllocator64Compact) { 748 TestCombinedAllocator<Allocator64Compact>(); 749 } 750 #endif 751 752 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) { 753 TestCombinedAllocator<Allocator64VeryCompact>(); 754 } 755 #endif 756 757 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) { 758 TestCombinedAllocator<Allocator32Compact>(); 759 } 760 761 template <class Allocator> 762 void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) { 763 using AllocatorCache = typename Allocator::AllocatorCache; 764 AllocatorCache cache; 765 Allocator *a = new Allocator(); 766 767 a->Init(kReleaseToOSIntervalNever, premapped_heap); 768 memset(&cache, 0, sizeof(cache)); 769 cache.Init(0); 770 771 const uptr kNumAllocs = 10000; 772 const int kNumIter = 100; 773 uptr saved_total = 0; 774 for (int class_id = 1; class_id <= 5; class_id++) { 775 for (int it = 0; it < kNumIter; it++) { 776 void *allocated[kNumAllocs]; 777 for (uptr i = 0; i < kNumAllocs; i++) { 778 allocated[i] = cache.Allocate(a, class_id); 779 } 780 for (uptr i = 0; i < kNumAllocs; i++) { 781 cache.Deallocate(a, class_id, allocated[i]); 782 } 783 cache.Drain(a); 784 uptr total_allocated = a->TotalMemoryUsed(); 785 if (it) 786 CHECK_EQ(saved_total, total_allocated); 787 saved_total = total_allocated; 788 } 789 } 790 791 a->TestOnlyUnmap(); 792 delete a; 793 } 794 795 #if SANITIZER_CAN_USE_ALLOCATOR64 796 // These tests can fail on Windows if memory is somewhat full and lit happens 797 // to run them all at the same time. FIXME: Make them not flaky and reenable. 798 #if !SANITIZER_WINDOWS 799 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) { 800 TestSizeClassAllocatorLocalCache<Allocator64>(); 801 } 802 803 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) { 804 TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(); 805 } 806 807 #if !SANITIZER_ANDROID 808 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) { 809 ScopedPremappedHeap h; 810 TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr()); 811 } 812 813 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) { 814 TestSizeClassAllocatorLocalCache<Allocator64Compact>(); 815 } 816 #endif 817 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) { 818 TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>(); 819 } 820 #endif 821 #endif 822 823 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) { 824 TestSizeClassAllocatorLocalCache<Allocator32Compact>(); 825 } 826 827 #if SANITIZER_CAN_USE_ALLOCATOR64 828 typedef Allocator64::AllocatorCache AllocatorCache; 829 static AllocatorCache static_allocator_cache; 830 831 void *AllocatorLeakTestWorker(void *arg) { 832 typedef AllocatorCache::Allocator Allocator; 833 Allocator *a = (Allocator*)(arg); 834 static_allocator_cache.Allocate(a, 10); 835 static_allocator_cache.Drain(a); 836 return 0; 837 } 838 839 TEST(SanitizerCommon, AllocatorLeakTest) { 840 typedef AllocatorCache::Allocator Allocator; 841 Allocator a; 842 a.Init(kReleaseToOSIntervalNever); 843 uptr total_used_memory = 0; 844 for (int i = 0; i < 100; i++) { 845 pthread_t t; 846 PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a); 847 PTHREAD_JOIN(t, 0); 848 if (i == 0) 849 total_used_memory = a.TotalMemoryUsed(); 850 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory); 851 } 852 853 a.TestOnlyUnmap(); 854 } 855 856 // Struct which is allocated to pass info to new threads. The new thread frees 857 // it. 858 struct NewThreadParams { 859 AllocatorCache *thread_cache; 860 AllocatorCache::Allocator *allocator; 861 uptr class_id; 862 }; 863 864 // Called in a new thread. Just frees its argument. 865 static void *DeallocNewThreadWorker(void *arg) { 866 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg); 867 params->thread_cache->Deallocate(params->allocator, params->class_id, params); 868 return NULL; 869 } 870 871 // The allocator cache is supposed to be POD and zero initialized. We should be 872 // able to call Deallocate on a zeroed cache, and it will self-initialize. 873 TEST(Allocator, AllocatorCacheDeallocNewThread) { 874 AllocatorCache::Allocator allocator; 875 allocator.Init(kReleaseToOSIntervalNever); 876 AllocatorCache main_cache; 877 AllocatorCache child_cache; 878 memset(&main_cache, 0, sizeof(main_cache)); 879 memset(&child_cache, 0, sizeof(child_cache)); 880 881 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams)); 882 NewThreadParams *params = reinterpret_cast<NewThreadParams*>( 883 main_cache.Allocate(&allocator, class_id)); 884 params->thread_cache = &child_cache; 885 params->allocator = &allocator; 886 params->class_id = class_id; 887 pthread_t t; 888 PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params); 889 PTHREAD_JOIN(t, 0); 890 891 allocator.TestOnlyUnmap(); 892 } 893 #endif 894 895 TEST(Allocator, Basic) { 896 char *p = (char*)InternalAlloc(10); 897 EXPECT_NE(p, (char*)0); 898 char *p2 = (char*)InternalAlloc(20); 899 EXPECT_NE(p2, (char*)0); 900 EXPECT_NE(p2, p); 901 InternalFree(p); 902 InternalFree(p2); 903 } 904 905 TEST(Allocator, Stress) { 906 const int kCount = 1000; 907 char *ptrs[kCount]; 908 unsigned rnd = 42; 909 for (int i = 0; i < kCount; i++) { 910 uptr sz = my_rand_r(&rnd) % 1000; 911 char *p = (char*)InternalAlloc(sz); 912 EXPECT_NE(p, (char*)0); 913 ptrs[i] = p; 914 } 915 for (int i = 0; i < kCount; i++) { 916 InternalFree(ptrs[i]); 917 } 918 } 919 920 TEST(Allocator, LargeAlloc) { 921 void *p = InternalAlloc(10 << 20); 922 InternalFree(p); 923 } 924 925 TEST(Allocator, ScopedBuffer) { 926 const int kSize = 512; 927 { 928 InternalMmapVector<int> int_buf(kSize); 929 EXPECT_EQ((uptr)kSize, int_buf.size()); 930 } 931 InternalMmapVector<char> char_buf(kSize); 932 EXPECT_EQ((uptr)kSize, char_buf.size()); 933 internal_memset(char_buf.data(), 'c', kSize); 934 for (int i = 0; i < kSize; i++) { 935 EXPECT_EQ('c', char_buf[i]); 936 } 937 } 938 939 void IterationTestCallback(uptr chunk, void *arg) { 940 reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk); 941 } 942 943 template <class Allocator> 944 void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) { 945 Allocator *a = new Allocator; 946 a->Init(kReleaseToOSIntervalNever, premapped_heap); 947 typename Allocator::AllocatorCache cache; 948 memset(&cache, 0, sizeof(cache)); 949 cache.Init(0); 950 951 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, 952 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000}; 953 954 std::vector<void *> allocated; 955 956 // Allocate a bunch of chunks. 957 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { 958 uptr size = sizes[s]; 959 if (!a->CanAllocate(size, 1)) continue; 960 // printf("s = %ld\n", size); 961 uptr n_iter = std::max((uptr)6, 80000 / size); 962 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); 963 for (uptr j = 0; j < n_iter; j++) { 964 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); 965 void *x = cache.Allocate(a, class_id0); 966 allocated.push_back(x); 967 } 968 } 969 970 std::set<uptr> reported_chunks; 971 a->ForceLock(); 972 a->ForEachChunk(IterationTestCallback, &reported_chunks); 973 a->ForceUnlock(); 974 975 for (uptr i = 0; i < allocated.size(); i++) { 976 // Don't use EXPECT_NE. Reporting the first mismatch is enough. 977 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), 978 reported_chunks.end()); 979 } 980 981 a->TestOnlyUnmap(); 982 delete a; 983 } 984 985 #if SANITIZER_CAN_USE_ALLOCATOR64 986 // These tests can fail on Windows if memory is somewhat full and lit happens 987 // to run them all at the same time. FIXME: Make them not flaky and reenable. 988 #if !SANITIZER_WINDOWS 989 TEST(SanitizerCommon, SizeClassAllocator64Iteration) { 990 TestSizeClassAllocatorIteration<Allocator64>(); 991 } 992 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) { 993 TestSizeClassAllocatorIteration<Allocator64Dynamic>(); 994 } 995 #if !SANITIZER_ANDROID 996 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) { 997 ScopedPremappedHeap h; 998 TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr()); 999 } 1000 #endif 1001 #endif 1002 #endif 1003 1004 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) { 1005 TestSizeClassAllocatorIteration<Allocator32Compact>(); 1006 } 1007 1008 TEST(SanitizerCommon, LargeMmapAllocatorIteration) { 1009 LargeMmapAllocator<NoOpMapUnmapCallback> a; 1010 a.Init(); 1011 AllocatorStats stats; 1012 stats.Init(); 1013 1014 static const uptr kNumAllocs = 1000; 1015 char *allocated[kNumAllocs]; 1016 static const uptr size = 40; 1017 // Allocate some. 1018 for (uptr i = 0; i < kNumAllocs; i++) 1019 allocated[i] = (char *)a.Allocate(&stats, size, 1); 1020 1021 std::set<uptr> reported_chunks; 1022 a.ForceLock(); 1023 a.ForEachChunk(IterationTestCallback, &reported_chunks); 1024 a.ForceUnlock(); 1025 1026 for (uptr i = 0; i < kNumAllocs; i++) { 1027 // Don't use EXPECT_NE. Reporting the first mismatch is enough. 1028 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), 1029 reported_chunks.end()); 1030 } 1031 for (uptr i = 0; i < kNumAllocs; i++) 1032 a.Deallocate(&stats, allocated[i]); 1033 } 1034 1035 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { 1036 LargeMmapAllocator<NoOpMapUnmapCallback> a; 1037 a.Init(); 1038 AllocatorStats stats; 1039 stats.Init(); 1040 1041 static const uptr kNumAllocs = 1024; 1042 static const uptr kNumExpectedFalseLookups = 10000000; 1043 char *allocated[kNumAllocs]; 1044 static const uptr size = 4096; 1045 // Allocate some. 1046 for (uptr i = 0; i < kNumAllocs; i++) { 1047 allocated[i] = (char *)a.Allocate(&stats, size, 1); 1048 } 1049 1050 a.ForceLock(); 1051 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { 1052 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i); 1053 char *p1 = allocated[i % kNumAllocs]; 1054 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1)); 1055 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2)); 1056 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1)); 1057 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100)); 1058 } 1059 1060 for (uptr i = 0; i < kNumExpectedFalseLookups; i++) { 1061 void *p = reinterpret_cast<void *>(i % 1024); 1062 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p)); 1063 p = reinterpret_cast<void *>(~0L - (i % 1024)); 1064 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p)); 1065 } 1066 a.ForceUnlock(); 1067 1068 for (uptr i = 0; i < kNumAllocs; i++) 1069 a.Deallocate(&stats, allocated[i]); 1070 } 1071 1072 1073 // Don't test OOM conditions on Win64 because it causes other tests on the same 1074 // machine to OOM. 1075 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID 1076 typedef __sanitizer::SizeClassMap<3, 4, 8, 38, 128, 16> SpecialSizeClassMap; 1077 template <typename AddressSpaceViewTy = LocalAddressSpaceView> 1078 struct AP64_SpecialSizeClassMap { 1079 static const uptr kSpaceBeg = kAllocatorSpace; 1080 static const uptr kSpaceSize = kAllocatorSize; 1081 static const uptr kMetadataSize = 0; 1082 typedef SpecialSizeClassMap SizeClassMap; 1083 typedef NoOpMapUnmapCallback MapUnmapCallback; 1084 static const uptr kFlags = 0; 1085 using AddressSpaceView = AddressSpaceViewTy; 1086 }; 1087 1088 // Regression test for out-of-memory condition in PopulateFreeList(). 1089 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { 1090 // In a world where regions are small and chunks are huge... 1091 typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64; 1092 const uptr kRegionSize = 1093 kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded; 1094 SpecialAllocator64 *a = new SpecialAllocator64; 1095 a->Init(kReleaseToOSIntervalNever); 1096 SpecialAllocator64::AllocatorCache cache; 1097 memset(&cache, 0, sizeof(cache)); 1098 cache.Init(0); 1099 1100 // ...one man is on a mission to overflow a region with a series of 1101 // successive allocations. 1102 1103 const uptr kClassID = 107; 1104 const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID); 1105 ASSERT_LT(2 * kAllocationSize, kRegionSize); 1106 ASSERT_GT(3 * kAllocationSize, kRegionSize); 1107 EXPECT_NE(cache.Allocate(a, kClassID), nullptr); 1108 EXPECT_NE(cache.Allocate(a, kClassID), nullptr); 1109 EXPECT_EQ(cache.Allocate(a, kClassID), nullptr); 1110 1111 const uptr Class2 = 100; 1112 const uptr Size2 = SpecialSizeClassMap::Size(Class2); 1113 ASSERT_EQ(Size2 * 8, kRegionSize); 1114 char *p[7]; 1115 for (int i = 0; i < 7; i++) { 1116 p[i] = (char*)cache.Allocate(a, Class2); 1117 EXPECT_NE(p[i], nullptr); 1118 fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2); 1119 p[i][Size2 - 1] = 42; 1120 if (i) ASSERT_LT(p[i - 1], p[i]); 1121 } 1122 EXPECT_EQ(cache.Allocate(a, Class2), nullptr); 1123 cache.Deallocate(a, Class2, p[0]); 1124 cache.Drain(a); 1125 ASSERT_EQ(p[6][Size2 - 1], 42); 1126 a->TestOnlyUnmap(); 1127 delete a; 1128 } 1129 1130 #endif 1131 1132 #if SANITIZER_CAN_USE_ALLOCATOR64 1133 1134 class NoMemoryMapper { 1135 public: 1136 uptr last_request_buffer_size; 1137 1138 NoMemoryMapper() : last_request_buffer_size(0) {} 1139 1140 void *MapPackedCounterArrayBuffer(uptr buffer_size) { 1141 last_request_buffer_size = buffer_size; 1142 return nullptr; 1143 } 1144 void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {} 1145 }; 1146 1147 class RedZoneMemoryMapper { 1148 public: 1149 RedZoneMemoryMapper() { 1150 const auto page_size = GetPageSize(); 1151 buffer = MmapOrDie(3ULL * page_size, ""); 1152 MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size); 1153 MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size); 1154 } 1155 ~RedZoneMemoryMapper() { 1156 UnmapOrDie(buffer, 3 * GetPageSize()); 1157 } 1158 1159 void *MapPackedCounterArrayBuffer(uptr buffer_size) { 1160 const auto page_size = GetPageSize(); 1161 CHECK_EQ(buffer_size, page_size); 1162 void *p = 1163 reinterpret_cast<void *>(reinterpret_cast<uptr>(buffer) + page_size); 1164 memset(p, 0, page_size); 1165 return p; 1166 } 1167 void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {} 1168 1169 private: 1170 void *buffer; 1171 }; 1172 1173 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) { 1174 NoMemoryMapper no_memory_mapper; 1175 typedef Allocator64::PackedCounterArray<NoMemoryMapper> 1176 NoMemoryPackedCounterArray; 1177 1178 for (int i = 0; i < 64; i++) { 1179 // Various valid counter's max values packed into one word. 1180 NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper); 1181 EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); 1182 1183 // Check the "all bit set" values too. 1184 NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper); 1185 EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); 1186 1187 // Verify the packing ratio, the counter is expected to be packed into the 1188 // closest power of 2 bits. 1189 NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper); 1190 EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1), 1191 no_memory_mapper.last_request_buffer_size); 1192 } 1193 1194 RedZoneMemoryMapper memory_mapper; 1195 typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper> 1196 RedZonePackedCounterArray; 1197 // Go through 1, 2, 4, 8, .. 64 bits per counter. 1198 for (int i = 0; i < 7; i++) { 1199 // Make sure counters request one memory page for the buffer. 1200 const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i); 1201 RedZonePackedCounterArray counters(kNumCounters, 1202 1ULL << ((1 << i) - 1), 1203 &memory_mapper); 1204 counters.Inc(0); 1205 for (u64 c = 1; c < kNumCounters - 1; c++) { 1206 ASSERT_EQ(0ULL, counters.Get(c)); 1207 counters.Inc(c); 1208 ASSERT_EQ(1ULL, counters.Get(c - 1)); 1209 } 1210 ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1)); 1211 counters.Inc(kNumCounters - 1); 1212 1213 if (i > 0) { 1214 counters.IncRange(0, kNumCounters - 1); 1215 for (u64 c = 0; c < kNumCounters; c++) 1216 ASSERT_EQ(2ULL, counters.Get(c)); 1217 } 1218 } 1219 } 1220 1221 class RangeRecorder { 1222 public: 1223 std::string reported_pages; 1224 1225 RangeRecorder() 1226 : page_size_scaled_log( 1227 Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)), 1228 last_page_reported(0) {} 1229 1230 void ReleasePageRangeToOS(u32 from, u32 to) { 1231 from >>= page_size_scaled_log; 1232 to >>= page_size_scaled_log; 1233 ASSERT_LT(from, to); 1234 if (!reported_pages.empty()) 1235 ASSERT_LT(last_page_reported, from); 1236 reported_pages.append(from - last_page_reported, '.'); 1237 reported_pages.append(to - from, 'x'); 1238 last_page_reported = to; 1239 } 1240 private: 1241 const uptr page_size_scaled_log; 1242 u32 last_page_reported; 1243 }; 1244 1245 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) { 1246 typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker; 1247 1248 // 'x' denotes a page to be released, '.' denotes a page to be kept around. 1249 const char* test_cases[] = { 1250 "", 1251 ".", 1252 "x", 1253 "........", 1254 "xxxxxxxxxxx", 1255 "..............xxxxx", 1256 "xxxxxxxxxxxxxxxxxx.....", 1257 "......xxxxxxxx........", 1258 "xxx..........xxxxxxxxxxxxxxx", 1259 "......xxxx....xxxx........", 1260 "xxx..........xxxxxxxx....xxxxxxx", 1261 "x.x.x.x.x.x.x.x.x.x.x.x.", 1262 ".x.x.x.x.x.x.x.x.x.x.x.x", 1263 ".x.x.x.x.x.x.x.x.x.x.x.x.", 1264 "x.x.x.x.x.x.x.x.x.x.x.x.x", 1265 }; 1266 1267 for (auto test_case : test_cases) { 1268 RangeRecorder range_recorder; 1269 RangeTracker tracker(&range_recorder); 1270 for (int i = 0; test_case[i] != 0; i++) 1271 tracker.NextPage(test_case[i] == 'x'); 1272 tracker.Done(); 1273 // Strip trailing '.'-pages before comparing the results as they are not 1274 // going to be reported to range_recorder anyway. 1275 const char* last_x = strrchr(test_case, 'x'); 1276 std::string expected( 1277 test_case, 1278 last_x == nullptr ? 0 : (last_x - test_case + 1)); 1279 EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str()); 1280 } 1281 } 1282 1283 class ReleasedPagesTrackingMemoryMapper { 1284 public: 1285 std::set<u32> reported_pages; 1286 1287 void *MapPackedCounterArrayBuffer(uptr buffer_size) { 1288 reported_pages.clear(); 1289 return calloc(1, buffer_size); 1290 } 1291 void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) { 1292 free(buffer); 1293 } 1294 1295 void ReleasePageRangeToOS(u32 from, u32 to) { 1296 uptr page_size_scaled = 1297 GetPageSizeCached() >> Allocator64::kCompactPtrScale; 1298 for (u32 i = from; i < to; i += page_size_scaled) 1299 reported_pages.insert(i); 1300 } 1301 }; 1302 1303 template <class Allocator> 1304 void TestReleaseFreeMemoryToOS() { 1305 ReleasedPagesTrackingMemoryMapper memory_mapper; 1306 const uptr kAllocatedPagesCount = 1024; 1307 const uptr page_size = GetPageSizeCached(); 1308 const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale; 1309 std::mt19937 r; 1310 uint32_t rnd_state = 42; 1311 1312 for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID; 1313 class_id++) { 1314 const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id); 1315 const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale; 1316 const uptr max_chunks = 1317 kAllocatedPagesCount * GetPageSizeCached() / chunk_size; 1318 1319 // Generate the random free list. 1320 std::vector<u32> free_array; 1321 bool in_free_range = false; 1322 uptr current_range_end = 0; 1323 for (uptr i = 0; i < max_chunks; i++) { 1324 if (i == current_range_end) { 1325 in_free_range = (my_rand_r(&rnd_state) & 1U) == 1; 1326 current_range_end += my_rand_r(&rnd_state) % 100 + 1; 1327 } 1328 if (in_free_range) 1329 free_array.push_back(i * chunk_size_scaled); 1330 } 1331 if (free_array.empty()) 1332 continue; 1333 // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on 1334 // the list ordering. 1335 std::shuffle(free_array.begin(), free_array.end(), r); 1336 1337 Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(), 1338 chunk_size, kAllocatedPagesCount, 1339 &memory_mapper); 1340 1341 // Verify that there are no released pages touched by used chunks and all 1342 // ranges of free chunks big enough to contain the entire memory pages had 1343 // these pages released. 1344 uptr verified_released_pages = 0; 1345 std::set<u32> free_chunks(free_array.begin(), free_array.end()); 1346 1347 u32 current_chunk = 0; 1348 in_free_range = false; 1349 u32 current_free_range_start = 0; 1350 for (uptr i = 0; i <= max_chunks; i++) { 1351 bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end(); 1352 1353 if (is_free_chunk) { 1354 if (!in_free_range) { 1355 in_free_range = true; 1356 current_free_range_start = current_chunk; 1357 } 1358 } else { 1359 // Verify that this used chunk does not touch any released page. 1360 for (uptr i_page = current_chunk / page_size_scaled; 1361 i_page <= (current_chunk + chunk_size_scaled - 1) / 1362 page_size_scaled; 1363 i_page++) { 1364 bool page_released = 1365 memory_mapper.reported_pages.find(i_page * page_size_scaled) != 1366 memory_mapper.reported_pages.end(); 1367 ASSERT_EQ(false, page_released); 1368 } 1369 1370 if (in_free_range) { 1371 in_free_range = false; 1372 // Verify that all entire memory pages covered by this range of free 1373 // chunks were released. 1374 u32 page = RoundUpTo(current_free_range_start, page_size_scaled); 1375 while (page + page_size_scaled <= current_chunk) { 1376 bool page_released = 1377 memory_mapper.reported_pages.find(page) != 1378 memory_mapper.reported_pages.end(); 1379 ASSERT_EQ(true, page_released); 1380 verified_released_pages++; 1381 page += page_size_scaled; 1382 } 1383 } 1384 } 1385 1386 current_chunk += chunk_size_scaled; 1387 } 1388 1389 ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages); 1390 } 1391 } 1392 1393 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) { 1394 TestReleaseFreeMemoryToOS<Allocator64>(); 1395 } 1396 1397 #if !SANITIZER_ANDROID 1398 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) { 1399 TestReleaseFreeMemoryToOS<Allocator64Compact>(); 1400 } 1401 1402 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) { 1403 TestReleaseFreeMemoryToOS<Allocator64VeryCompact>(); 1404 } 1405 #endif // !SANITIZER_ANDROID 1406 1407 #endif // SANITIZER_CAN_USE_ALLOCATOR64 1408 1409 TEST(SanitizerCommon, TwoLevelByteMap) { 1410 const u64 kSize1 = 1 << 6, kSize2 = 1 << 12; 1411 const u64 n = kSize1 * kSize2; 1412 TwoLevelByteMap<kSize1, kSize2> m; 1413 m.Init(); 1414 for (u64 i = 0; i < n; i += 7) { 1415 m.set(i, (i % 100) + 1); 1416 } 1417 for (u64 j = 0; j < n; j++) { 1418 if (j % 7) 1419 EXPECT_EQ(m[j], 0); 1420 else 1421 EXPECT_EQ(m[j], (j % 100) + 1); 1422 } 1423 1424 m.TestOnlyUnmap(); 1425 } 1426 1427 template <typename AddressSpaceView> 1428 using TestByteMapASVT = 1429 TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>; 1430 using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>; 1431 1432 struct TestByteMapParam { 1433 TestByteMap *m; 1434 size_t shard; 1435 size_t num_shards; 1436 }; 1437 1438 void *TwoLevelByteMapUserThread(void *param) { 1439 TestByteMapParam *p = (TestByteMapParam*)param; 1440 for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) { 1441 size_t val = (i % 100) + 1; 1442 p->m->set(i, val); 1443 EXPECT_EQ((*p->m)[i], val); 1444 } 1445 return 0; 1446 } 1447 1448 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) { 1449 TestByteMap m; 1450 m.Init(); 1451 TestMapUnmapCallback::map_count = 0; 1452 TestMapUnmapCallback::unmap_count = 0; 1453 static const int kNumThreads = 4; 1454 pthread_t t[kNumThreads]; 1455 TestByteMapParam p[kNumThreads]; 1456 for (int i = 0; i < kNumThreads; i++) { 1457 p[i].m = &m; 1458 p[i].shard = i; 1459 p[i].num_shards = kNumThreads; 1460 PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]); 1461 } 1462 for (int i = 0; i < kNumThreads; i++) { 1463 PTHREAD_JOIN(t[i], 0); 1464 } 1465 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1()); 1466 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL); 1467 m.TestOnlyUnmap(); 1468 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1()); 1469 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1()); 1470 } 1471 1472 TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) { 1473 // When allocating a memory block slightly bigger than a memory page and 1474 // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round 1475 // the size up to the page size, so that subsequent calls to the allocator 1476 // can use the remaining space in the last allocated page. 1477 static LowLevelAllocator allocator; 1478 char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16); 1479 char *ptr2 = (char *)allocator.Allocate(16); 1480 EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16); 1481 } 1482 1483 #endif // #if !SANITIZER_DEBUG 1484