1 //===-- sanitizer_allocator_test.cpp --------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime. 10 // Tests for sanitizer_allocator.h. 11 // 12 //===----------------------------------------------------------------------===// 13 #include "sanitizer_common/sanitizer_allocator.h" 14 #include "sanitizer_common/sanitizer_allocator_internal.h" 15 #include "sanitizer_common/sanitizer_common.h" 16 17 #include "sanitizer_test_utils.h" 18 #include "sanitizer_pthread_wrappers.h" 19 20 #include "gtest/gtest.h" 21 22 #include <stdio.h> 23 #include <stdlib.h> 24 #include <algorithm> 25 #include <vector> 26 #include <random> 27 #include <set> 28 29 using namespace __sanitizer; 30 31 #if SANITIZER_SOLARIS && defined(__sparcv9) 32 // FIXME: These tests probably fail because Solaris/sparcv9 uses the full 33 // 64-bit address space. Needs more investigation 34 #define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x 35 #else 36 #define SKIP_ON_SOLARIS_SPARCV9(x) x 37 #endif 38 39 // Too slow for debug build 40 #if !SANITIZER_DEBUG 41 42 #if SANITIZER_CAN_USE_ALLOCATOR64 43 #if SANITIZER_WINDOWS 44 // On Windows 64-bit there is no easy way to find a large enough fixed address 45 // space that is always available. Thus, a dynamically allocated address space 46 // is used instead (i.e. ~(uptr)0). 47 static const uptr kAllocatorSpace = ~(uptr)0; 48 static const uptr kAllocatorSize = 0x8000000000ULL; // 500G 49 static const u64 kAddressSpaceSize = 1ULL << 47; 50 typedef DefaultSizeClassMap SizeClassMap; 51 #elif SANITIZER_ANDROID && defined(__aarch64__) 52 static const uptr kAllocatorSpace = 0x3000000000ULL; 53 static const uptr kAllocatorSize = 0x2000000000ULL; 54 static const u64 kAddressSpaceSize = 1ULL << 39; 55 typedef VeryCompactSizeClassMap SizeClassMap; 56 #else 57 static const uptr kAllocatorSpace = 0x700000000000ULL; 58 static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. 59 static const u64 kAddressSpaceSize = 1ULL << 47; 60 typedef DefaultSizeClassMap SizeClassMap; 61 #endif 62 63 template <typename AddressSpaceViewTy> 64 struct AP64 { // Allocator Params. Short name for shorter demangled names.. 65 static const uptr kSpaceBeg = kAllocatorSpace; 66 static const uptr kSpaceSize = kAllocatorSize; 67 static const uptr kMetadataSize = 16; 68 typedef ::SizeClassMap SizeClassMap; 69 typedef NoOpMapUnmapCallback MapUnmapCallback; 70 static const uptr kFlags = 0; 71 using AddressSpaceView = AddressSpaceViewTy; 72 }; 73 74 template <typename AddressSpaceViewTy> 75 struct AP64Dyn { 76 static const uptr kSpaceBeg = ~(uptr)0; 77 static const uptr kSpaceSize = kAllocatorSize; 78 static const uptr kMetadataSize = 16; 79 typedef ::SizeClassMap SizeClassMap; 80 typedef NoOpMapUnmapCallback MapUnmapCallback; 81 static const uptr kFlags = 0; 82 using AddressSpaceView = AddressSpaceViewTy; 83 }; 84 85 template <typename AddressSpaceViewTy> 86 struct AP64Compact { 87 static const uptr kSpaceBeg = ~(uptr)0; 88 static const uptr kSpaceSize = kAllocatorSize; 89 static const uptr kMetadataSize = 16; 90 typedef CompactSizeClassMap SizeClassMap; 91 typedef NoOpMapUnmapCallback MapUnmapCallback; 92 static const uptr kFlags = 0; 93 using AddressSpaceView = AddressSpaceViewTy; 94 }; 95 96 template <typename AddressSpaceViewTy> 97 struct AP64VeryCompact { 98 static const uptr kSpaceBeg = ~(uptr)0; 99 static const uptr kSpaceSize = 1ULL << 37; 100 static const uptr kMetadataSize = 16; 101 typedef VeryCompactSizeClassMap SizeClassMap; 102 typedef NoOpMapUnmapCallback MapUnmapCallback; 103 static const uptr kFlags = 0; 104 using AddressSpaceView = AddressSpaceViewTy; 105 }; 106 107 template <typename AddressSpaceViewTy> 108 struct AP64Dense { 109 static const uptr kSpaceBeg = kAllocatorSpace; 110 static const uptr kSpaceSize = kAllocatorSize; 111 static const uptr kMetadataSize = 16; 112 typedef DenseSizeClassMap SizeClassMap; 113 typedef NoOpMapUnmapCallback MapUnmapCallback; 114 static const uptr kFlags = 0; 115 using AddressSpaceView = AddressSpaceViewTy; 116 }; 117 118 template <typename AddressSpaceView> 119 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>; 120 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>; 121 122 template <typename AddressSpaceView> 123 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>; 124 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>; 125 126 template <typename AddressSpaceView> 127 using Allocator64CompactASVT = 128 SizeClassAllocator64<AP64Compact<AddressSpaceView>>; 129 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>; 130 131 template <typename AddressSpaceView> 132 using Allocator64VeryCompactASVT = 133 SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>; 134 using Allocator64VeryCompact = 135 Allocator64VeryCompactASVT<LocalAddressSpaceView>; 136 137 template <typename AddressSpaceView> 138 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>; 139 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>; 140 141 #elif defined(__mips64) 142 static const u64 kAddressSpaceSize = 1ULL << 40; 143 #elif defined(__aarch64__) 144 static const u64 kAddressSpaceSize = 1ULL << 39; 145 #elif defined(__s390x__) 146 static const u64 kAddressSpaceSize = 1ULL << 53; 147 #elif defined(__s390__) 148 static const u64 kAddressSpaceSize = 1ULL << 31; 149 #else 150 static const u64 kAddressSpaceSize = 1ULL << 32; 151 #endif 152 153 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); 154 155 template <typename AddressSpaceViewTy> 156 struct AP32Compact { 157 static const uptr kSpaceBeg = 0; 158 static const u64 kSpaceSize = kAddressSpaceSize; 159 static const uptr kMetadataSize = 16; 160 typedef CompactSizeClassMap SizeClassMap; 161 static const uptr kRegionSizeLog = ::kRegionSizeLog; 162 using AddressSpaceView = AddressSpaceViewTy; 163 typedef NoOpMapUnmapCallback MapUnmapCallback; 164 static const uptr kFlags = 0; 165 }; 166 template <typename AddressSpaceView> 167 using Allocator32CompactASVT = 168 SizeClassAllocator32<AP32Compact<AddressSpaceView>>; 169 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>; 170 171 template <class SizeClassMap> 172 void TestSizeClassMap() { 173 typedef SizeClassMap SCMap; 174 SCMap::Print(); 175 SCMap::Validate(); 176 } 177 178 TEST(SanitizerCommon, DefaultSizeClassMap) { 179 TestSizeClassMap<DefaultSizeClassMap>(); 180 } 181 182 TEST(SanitizerCommon, CompactSizeClassMap) { 183 TestSizeClassMap<CompactSizeClassMap>(); 184 } 185 186 TEST(SanitizerCommon, VeryCompactSizeClassMap) { 187 TestSizeClassMap<VeryCompactSizeClassMap>(); 188 } 189 190 TEST(SanitizerCommon, InternalSizeClassMap) { 191 TestSizeClassMap<InternalSizeClassMap>(); 192 } 193 194 TEST(SanitizerCommon, DenseSizeClassMap) { 195 TestSizeClassMap<VeryCompactSizeClassMap>(); 196 } 197 198 template <class Allocator> 199 void TestSizeClassAllocator() { 200 Allocator *a = new Allocator; 201 a->Init(kReleaseToOSIntervalNever); 202 typename Allocator::AllocatorCache cache; 203 memset(&cache, 0, sizeof(cache)); 204 cache.Init(0); 205 206 static const uptr sizes[] = { 207 1, 16, 30, 40, 100, 1000, 10000, 208 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000 209 }; 210 211 std::vector<void *> allocated; 212 213 uptr last_total_allocated = 0; 214 for (int i = 0; i < 3; i++) { 215 // Allocate a bunch of chunks. 216 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { 217 uptr size = sizes[s]; 218 if (!a->CanAllocate(size, 1)) continue; 219 // printf("s = %ld\n", size); 220 uptr n_iter = std::max((uptr)6, 4000000 / size); 221 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); 222 for (uptr i = 0; i < n_iter; i++) { 223 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); 224 char *x = (char*)cache.Allocate(a, class_id0); 225 x[0] = 0; 226 x[size - 1] = 0; 227 x[size / 2] = 0; 228 allocated.push_back(x); 229 CHECK_EQ(x, a->GetBlockBegin(x)); 230 CHECK_EQ(x, a->GetBlockBegin(x + size - 1)); 231 CHECK(a->PointerIsMine(x)); 232 CHECK(a->PointerIsMine(x + size - 1)); 233 CHECK(a->PointerIsMine(x + size / 2)); 234 CHECK_GE(a->GetActuallyAllocatedSize(x), size); 235 uptr class_id = a->GetSizeClass(x); 236 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size)); 237 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); 238 metadata[0] = reinterpret_cast<uptr>(x) + 1; 239 metadata[1] = 0xABCD; 240 } 241 } 242 // Deallocate all. 243 for (uptr i = 0; i < allocated.size(); i++) { 244 void *x = allocated[i]; 245 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); 246 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1); 247 CHECK_EQ(metadata[1], 0xABCD); 248 cache.Deallocate(a, a->GetSizeClass(x), x); 249 } 250 allocated.clear(); 251 uptr total_allocated = a->TotalMemoryUsed(); 252 if (last_total_allocated == 0) 253 last_total_allocated = total_allocated; 254 CHECK_EQ(last_total_allocated, total_allocated); 255 } 256 257 // Check that GetBlockBegin never crashes. 258 for (uptr x = 0, step = kAddressSpaceSize / 100000; 259 x < kAddressSpaceSize - step; x += step) 260 if (a->PointerIsMine(reinterpret_cast<void *>(x))) 261 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x))); 262 263 a->TestOnlyUnmap(); 264 delete a; 265 } 266 267 #if SANITIZER_CAN_USE_ALLOCATOR64 268 // These tests can fail on Windows if memory is somewhat full and lit happens 269 // to run them all at the same time. FIXME: Make them not flaky and reenable. 270 #if !SANITIZER_WINDOWS 271 TEST(SanitizerCommon, SizeClassAllocator64) { 272 TestSizeClassAllocator<Allocator64>(); 273 } 274 275 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) { 276 TestSizeClassAllocator<Allocator64Dynamic>(); 277 } 278 279 #if !SANITIZER_ANDROID 280 //FIXME(kostyak): find values so that those work on Android as well. 281 TEST(SanitizerCommon, SizeClassAllocator64Compact) { 282 TestSizeClassAllocator<Allocator64Compact>(); 283 } 284 285 TEST(SanitizerCommon, SizeClassAllocator64Dense) { 286 TestSizeClassAllocator<Allocator64Dense>(); 287 } 288 #endif 289 290 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) { 291 TestSizeClassAllocator<Allocator64VeryCompact>(); 292 } 293 #endif 294 #endif 295 296 TEST(SanitizerCommon, SizeClassAllocator32Compact) { 297 TestSizeClassAllocator<Allocator32Compact>(); 298 } 299 300 template <typename AddressSpaceViewTy> 301 struct AP32SeparateBatches { 302 static const uptr kSpaceBeg = 0; 303 static const u64 kSpaceSize = kAddressSpaceSize; 304 static const uptr kMetadataSize = 16; 305 typedef DefaultSizeClassMap SizeClassMap; 306 static const uptr kRegionSizeLog = ::kRegionSizeLog; 307 using AddressSpaceView = AddressSpaceViewTy; 308 typedef NoOpMapUnmapCallback MapUnmapCallback; 309 static const uptr kFlags = 310 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; 311 }; 312 template <typename AddressSpaceView> 313 using Allocator32SeparateBatchesASVT = 314 SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>; 315 using Allocator32SeparateBatches = 316 Allocator32SeparateBatchesASVT<LocalAddressSpaceView>; 317 318 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) { 319 TestSizeClassAllocator<Allocator32SeparateBatches>(); 320 } 321 322 template <class Allocator> 323 void SizeClassAllocatorMetadataStress() { 324 Allocator *a = new Allocator; 325 a->Init(kReleaseToOSIntervalNever); 326 typename Allocator::AllocatorCache cache; 327 memset(&cache, 0, sizeof(cache)); 328 cache.Init(0); 329 330 const uptr kNumAllocs = 1 << 13; 331 void *allocated[kNumAllocs]; 332 void *meta[kNumAllocs]; 333 for (uptr i = 0; i < kNumAllocs; i++) { 334 void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1)); 335 allocated[i] = x; 336 meta[i] = a->GetMetaData(x); 337 } 338 // Get Metadata kNumAllocs^2 times. 339 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { 340 uptr idx = i % kNumAllocs; 341 void *m = a->GetMetaData(allocated[idx]); 342 EXPECT_EQ(m, meta[idx]); 343 } 344 for (uptr i = 0; i < kNumAllocs; i++) { 345 cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]); 346 } 347 348 a->TestOnlyUnmap(); 349 delete a; 350 } 351 352 #if SANITIZER_CAN_USE_ALLOCATOR64 353 // These tests can fail on Windows if memory is somewhat full and lit happens 354 // to run them all at the same time. FIXME: Make them not flaky and reenable. 355 #if !SANITIZER_WINDOWS 356 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) { 357 SizeClassAllocatorMetadataStress<Allocator64>(); 358 } 359 360 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) { 361 SizeClassAllocatorMetadataStress<Allocator64Dynamic>(); 362 } 363 364 #if !SANITIZER_ANDROID 365 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { 366 SizeClassAllocatorMetadataStress<Allocator64Compact>(); 367 } 368 #endif 369 370 #endif 371 #endif // SANITIZER_CAN_USE_ALLOCATOR64 372 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) { 373 SizeClassAllocatorMetadataStress<Allocator32Compact>(); 374 } 375 376 template <class Allocator> 377 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) { 378 Allocator *a = new Allocator; 379 a->Init(kReleaseToOSIntervalNever); 380 typename Allocator::AllocatorCache cache; 381 memset(&cache, 0, sizeof(cache)); 382 cache.Init(0); 383 384 uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID; 385 uptr size = Allocator::SizeClassMapT::Size(max_size_class); 386 // Make sure we correctly compute GetBlockBegin() w/o overflow. 387 for (size_t i = 0; i <= TotalSize / size; i++) { 388 void *x = cache.Allocate(a, max_size_class); 389 void *beg = a->GetBlockBegin(x); 390 // if ((i & (i - 1)) == 0) 391 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg); 392 EXPECT_EQ(x, beg); 393 } 394 395 a->TestOnlyUnmap(); 396 delete a; 397 } 398 399 #if SANITIZER_CAN_USE_ALLOCATOR64 400 // These tests can fail on Windows if memory is somewhat full and lit happens 401 // to run them all at the same time. FIXME: Make them not flaky and reenable. 402 #if !SANITIZER_WINDOWS 403 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) { 404 SizeClassAllocatorGetBlockBeginStress<Allocator64>( 405 1ULL << (SANITIZER_ANDROID ? 31 : 33)); 406 } 407 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) { 408 SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( 409 1ULL << (SANITIZER_ANDROID ? 31 : 33)); 410 } 411 #if !SANITIZER_ANDROID 412 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { 413 SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33); 414 } 415 #endif 416 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) { 417 // Does not have > 4Gb for each class. 418 SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31); 419 } 420 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) { 421 SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33); 422 } 423 #endif 424 #endif // SANITIZER_CAN_USE_ALLOCATOR64 425 426 struct TestMapUnmapCallback { 427 static int map_count, unmap_count; 428 void OnMap(uptr p, uptr size) const { map_count++; } 429 void OnUnmap(uptr p, uptr size) const { unmap_count++; } 430 }; 431 int TestMapUnmapCallback::map_count; 432 int TestMapUnmapCallback::unmap_count; 433 434 #if SANITIZER_CAN_USE_ALLOCATOR64 435 // These tests can fail on Windows if memory is somewhat full and lit happens 436 // to run them all at the same time. FIXME: Make them not flaky and reenable. 437 #if !SANITIZER_WINDOWS 438 439 template <typename AddressSpaceViewTy = LocalAddressSpaceView> 440 struct AP64WithCallback { 441 static const uptr kSpaceBeg = kAllocatorSpace; 442 static const uptr kSpaceSize = kAllocatorSize; 443 static const uptr kMetadataSize = 16; 444 typedef ::SizeClassMap SizeClassMap; 445 typedef TestMapUnmapCallback MapUnmapCallback; 446 static const uptr kFlags = 0; 447 using AddressSpaceView = AddressSpaceViewTy; 448 }; 449 450 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { 451 TestMapUnmapCallback::map_count = 0; 452 TestMapUnmapCallback::unmap_count = 0; 453 typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack; 454 Allocator64WithCallBack *a = new Allocator64WithCallBack; 455 a->Init(kReleaseToOSIntervalNever); 456 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. 457 typename Allocator64WithCallBack::AllocatorCache cache; 458 memset(&cache, 0, sizeof(cache)); 459 cache.Init(0); 460 AllocatorStats stats; 461 stats.Init(); 462 const size_t kNumChunks = 128; 463 uint32_t chunks[kNumChunks]; 464 a->GetFromAllocator(&stats, 30, chunks, kNumChunks); 465 // State + alloc + metadata + freearray. 466 EXPECT_EQ(TestMapUnmapCallback::map_count, 4); 467 a->TestOnlyUnmap(); 468 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing. 469 delete a; 470 } 471 #endif 472 #endif 473 474 template <typename AddressSpaceViewTy = LocalAddressSpaceView> 475 struct AP32WithCallback { 476 static const uptr kSpaceBeg = 0; 477 static const u64 kSpaceSize = kAddressSpaceSize; 478 static const uptr kMetadataSize = 16; 479 typedef CompactSizeClassMap SizeClassMap; 480 static const uptr kRegionSizeLog = ::kRegionSizeLog; 481 using AddressSpaceView = AddressSpaceViewTy; 482 typedef TestMapUnmapCallback MapUnmapCallback; 483 static const uptr kFlags = 0; 484 }; 485 486 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { 487 TestMapUnmapCallback::map_count = 0; 488 TestMapUnmapCallback::unmap_count = 0; 489 typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack; 490 Allocator32WithCallBack *a = new Allocator32WithCallBack; 491 a->Init(kReleaseToOSIntervalNever); 492 EXPECT_EQ(TestMapUnmapCallback::map_count, 0); 493 Allocator32WithCallBack::AllocatorCache cache; 494 memset(&cache, 0, sizeof(cache)); 495 cache.Init(0); 496 AllocatorStats stats; 497 stats.Init(); 498 a->AllocateBatch(&stats, &cache, 32); 499 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); 500 a->TestOnlyUnmap(); 501 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); 502 delete a; 503 // fprintf(stderr, "Map: %d Unmap: %d\n", 504 // TestMapUnmapCallback::map_count, 505 // TestMapUnmapCallback::unmap_count); 506 } 507 508 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { 509 TestMapUnmapCallback::map_count = 0; 510 TestMapUnmapCallback::unmap_count = 0; 511 LargeMmapAllocator<TestMapUnmapCallback> a; 512 a.Init(); 513 AllocatorStats stats; 514 stats.Init(); 515 void *x = a.Allocate(&stats, 1 << 20, 1); 516 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); 517 a.Deallocate(&stats, x); 518 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); 519 } 520 521 // Don't test OOM conditions on Win64 because it causes other tests on the same 522 // machine to OOM. 523 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID 524 TEST(SanitizerCommon, SizeClassAllocator64Overflow) { 525 Allocator64 a; 526 a.Init(kReleaseToOSIntervalNever); 527 Allocator64::AllocatorCache cache; 528 memset(&cache, 0, sizeof(cache)); 529 cache.Init(0); 530 AllocatorStats stats; 531 stats.Init(); 532 533 const size_t kNumChunks = 128; 534 uint32_t chunks[kNumChunks]; 535 bool allocation_failed = false; 536 for (int i = 0; i < 1000000; i++) { 537 if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) { 538 allocation_failed = true; 539 break; 540 } 541 } 542 EXPECT_EQ(allocation_failed, true); 543 544 a.TestOnlyUnmap(); 545 } 546 #endif 547 548 TEST(SanitizerCommon, LargeMmapAllocator) { 549 LargeMmapAllocator<NoOpMapUnmapCallback> a; 550 a.Init(); 551 AllocatorStats stats; 552 stats.Init(); 553 554 static const int kNumAllocs = 1000; 555 char *allocated[kNumAllocs]; 556 static const uptr size = 4000; 557 // Allocate some. 558 for (int i = 0; i < kNumAllocs; i++) { 559 allocated[i] = (char *)a.Allocate(&stats, size, 1); 560 CHECK(a.PointerIsMine(allocated[i])); 561 } 562 // Deallocate all. 563 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); 564 for (int i = 0; i < kNumAllocs; i++) { 565 char *p = allocated[i]; 566 CHECK(a.PointerIsMine(p)); 567 a.Deallocate(&stats, p); 568 } 569 // Check that non left. 570 CHECK_EQ(a.TotalMemoryUsed(), 0); 571 572 // Allocate some more, also add metadata. 573 for (int i = 0; i < kNumAllocs; i++) { 574 char *x = (char *)a.Allocate(&stats, size, 1); 575 CHECK_GE(a.GetActuallyAllocatedSize(x), size); 576 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); 577 *meta = i; 578 allocated[i] = x; 579 } 580 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) { 581 char *p = allocated[i % kNumAllocs]; 582 CHECK(a.PointerIsMine(p)); 583 CHECK(a.PointerIsMine(p + 2000)); 584 } 585 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); 586 // Deallocate all in reverse order. 587 for (int i = 0; i < kNumAllocs; i++) { 588 int idx = kNumAllocs - i - 1; 589 char *p = allocated[idx]; 590 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p)); 591 CHECK_EQ(*meta, idx); 592 CHECK(a.PointerIsMine(p)); 593 a.Deallocate(&stats, p); 594 } 595 CHECK_EQ(a.TotalMemoryUsed(), 0); 596 597 // Test alignments. Test with 512MB alignment on x64 non-Windows machines. 598 // Windows doesn't overcommit, and many machines do not have 51.2GB of swap. 599 uptr max_alignment = 600 (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24); 601 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) { 602 const uptr kNumAlignedAllocs = 100; 603 for (uptr i = 0; i < kNumAlignedAllocs; i++) { 604 uptr size = ((i % 10) + 1) * 4096; 605 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment); 606 CHECK_EQ(p, a.GetBlockBegin(p)); 607 CHECK_EQ(p, a.GetBlockBegin(p + size - 1)); 608 CHECK_EQ(p, a.GetBlockBegin(p + size / 2)); 609 CHECK_EQ(0, (uptr)allocated[i] % alignment); 610 p[0] = p[size - 1] = 0; 611 } 612 for (uptr i = 0; i < kNumAlignedAllocs; i++) { 613 a.Deallocate(&stats, allocated[i]); 614 } 615 } 616 617 // Regression test for boundary condition in GetBlockBegin(). 618 uptr page_size = GetPageSizeCached(); 619 char *p = (char *)a.Allocate(&stats, page_size, 1); 620 CHECK_EQ(p, a.GetBlockBegin(p)); 621 CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1)); 622 CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size)); 623 a.Deallocate(&stats, p); 624 } 625 626 template <class PrimaryAllocator> 627 void TestCombinedAllocator() { 628 typedef CombinedAllocator<PrimaryAllocator> Allocator; 629 Allocator *a = new Allocator; 630 a->Init(kReleaseToOSIntervalNever); 631 std::mt19937 r; 632 633 typename Allocator::AllocatorCache cache; 634 memset(&cache, 0, sizeof(cache)); 635 a->InitCache(&cache); 636 637 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); 638 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0); 639 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); 640 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); 641 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); 642 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); 643 644 const uptr kNumAllocs = 100000; 645 const uptr kNumIter = 10; 646 for (uptr iter = 0; iter < kNumIter; iter++) { 647 std::vector<void*> allocated; 648 for (uptr i = 0; i < kNumAllocs; i++) { 649 uptr size = (i % (1 << 14)) + 1; 650 if ((i % 1024) == 0) 651 size = 1 << (10 + (i % 14)); 652 void *x = a->Allocate(&cache, size, 1); 653 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); 654 CHECK_EQ(*meta, 0); 655 *meta = size; 656 allocated.push_back(x); 657 } 658 659 std::shuffle(allocated.begin(), allocated.end(), r); 660 661 // Test ForEachChunk(...) 662 { 663 std::set<void *> reported_chunks; 664 auto cb = [](uptr chunk, void *arg) { 665 auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg); 666 auto pair = 667 reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk)); 668 // Check chunk is never reported more than once. 669 ASSERT_TRUE(pair.second); 670 }; 671 a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks)); 672 for (const auto &allocated_ptr : allocated) { 673 ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end()); 674 } 675 } 676 677 for (uptr i = 0; i < kNumAllocs; i++) { 678 void *x = allocated[i]; 679 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); 680 CHECK_NE(*meta, 0); 681 CHECK(a->PointerIsMine(x)); 682 *meta = 0; 683 a->Deallocate(&cache, x); 684 } 685 allocated.clear(); 686 a->SwallowCache(&cache); 687 } 688 a->DestroyCache(&cache); 689 a->TestOnlyUnmap(); 690 } 691 692 #if SANITIZER_CAN_USE_ALLOCATOR64 693 TEST(SanitizerCommon, CombinedAllocator64) { 694 TestCombinedAllocator<Allocator64>(); 695 } 696 697 TEST(SanitizerCommon, CombinedAllocator64Dynamic) { 698 TestCombinedAllocator<Allocator64Dynamic>(); 699 } 700 701 #if !SANITIZER_ANDROID 702 TEST(SanitizerCommon, CombinedAllocator64Compact) { 703 TestCombinedAllocator<Allocator64Compact>(); 704 } 705 #endif 706 707 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) { 708 TestCombinedAllocator<Allocator64VeryCompact>(); 709 } 710 #endif 711 712 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) { 713 TestCombinedAllocator<Allocator32Compact>(); 714 } 715 716 template <class Allocator> 717 void TestSizeClassAllocatorLocalCache() { 718 using AllocatorCache = typename Allocator::AllocatorCache; 719 AllocatorCache cache; 720 Allocator *a = new Allocator(); 721 722 a->Init(kReleaseToOSIntervalNever); 723 memset(&cache, 0, sizeof(cache)); 724 cache.Init(0); 725 726 const uptr kNumAllocs = 10000; 727 const int kNumIter = 100; 728 uptr saved_total = 0; 729 for (int class_id = 1; class_id <= 5; class_id++) { 730 for (int it = 0; it < kNumIter; it++) { 731 void *allocated[kNumAllocs]; 732 for (uptr i = 0; i < kNumAllocs; i++) { 733 allocated[i] = cache.Allocate(a, class_id); 734 } 735 for (uptr i = 0; i < kNumAllocs; i++) { 736 cache.Deallocate(a, class_id, allocated[i]); 737 } 738 cache.Drain(a); 739 uptr total_allocated = a->TotalMemoryUsed(); 740 if (it) 741 CHECK_EQ(saved_total, total_allocated); 742 saved_total = total_allocated; 743 } 744 } 745 746 a->TestOnlyUnmap(); 747 delete a; 748 } 749 750 #if SANITIZER_CAN_USE_ALLOCATOR64 751 // These tests can fail on Windows if memory is somewhat full and lit happens 752 // to run them all at the same time. FIXME: Make them not flaky and reenable. 753 #if !SANITIZER_WINDOWS 754 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) { 755 TestSizeClassAllocatorLocalCache<Allocator64>(); 756 } 757 758 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) { 759 TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(); 760 } 761 762 #if !SANITIZER_ANDROID 763 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) { 764 TestSizeClassAllocatorLocalCache<Allocator64Compact>(); 765 } 766 #endif 767 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) { 768 TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>(); 769 } 770 #endif 771 #endif 772 773 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) { 774 TestSizeClassAllocatorLocalCache<Allocator32Compact>(); 775 } 776 777 #if SANITIZER_CAN_USE_ALLOCATOR64 778 typedef Allocator64::AllocatorCache AllocatorCache; 779 static AllocatorCache static_allocator_cache; 780 781 void *AllocatorLeakTestWorker(void *arg) { 782 typedef AllocatorCache::Allocator Allocator; 783 Allocator *a = (Allocator*)(arg); 784 static_allocator_cache.Allocate(a, 10); 785 static_allocator_cache.Drain(a); 786 return 0; 787 } 788 789 TEST(SanitizerCommon, AllocatorLeakTest) { 790 typedef AllocatorCache::Allocator Allocator; 791 Allocator a; 792 a.Init(kReleaseToOSIntervalNever); 793 uptr total_used_memory = 0; 794 for (int i = 0; i < 100; i++) { 795 pthread_t t; 796 PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a); 797 PTHREAD_JOIN(t, 0); 798 if (i == 0) 799 total_used_memory = a.TotalMemoryUsed(); 800 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory); 801 } 802 803 a.TestOnlyUnmap(); 804 } 805 806 // Struct which is allocated to pass info to new threads. The new thread frees 807 // it. 808 struct NewThreadParams { 809 AllocatorCache *thread_cache; 810 AllocatorCache::Allocator *allocator; 811 uptr class_id; 812 }; 813 814 // Called in a new thread. Just frees its argument. 815 static void *DeallocNewThreadWorker(void *arg) { 816 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg); 817 params->thread_cache->Deallocate(params->allocator, params->class_id, params); 818 return NULL; 819 } 820 821 // The allocator cache is supposed to be POD and zero initialized. We should be 822 // able to call Deallocate on a zeroed cache, and it will self-initialize. 823 TEST(Allocator, AllocatorCacheDeallocNewThread) { 824 AllocatorCache::Allocator allocator; 825 allocator.Init(kReleaseToOSIntervalNever); 826 AllocatorCache main_cache; 827 AllocatorCache child_cache; 828 memset(&main_cache, 0, sizeof(main_cache)); 829 memset(&child_cache, 0, sizeof(child_cache)); 830 831 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams)); 832 NewThreadParams *params = reinterpret_cast<NewThreadParams*>( 833 main_cache.Allocate(&allocator, class_id)); 834 params->thread_cache = &child_cache; 835 params->allocator = &allocator; 836 params->class_id = class_id; 837 pthread_t t; 838 PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params); 839 PTHREAD_JOIN(t, 0); 840 841 allocator.TestOnlyUnmap(); 842 } 843 #endif 844 845 TEST(Allocator, Basic) { 846 char *p = (char*)InternalAlloc(10); 847 EXPECT_NE(p, (char*)0); 848 char *p2 = (char*)InternalAlloc(20); 849 EXPECT_NE(p2, (char*)0); 850 EXPECT_NE(p2, p); 851 InternalFree(p); 852 InternalFree(p2); 853 } 854 855 TEST(Allocator, Stress) { 856 const int kCount = 1000; 857 char *ptrs[kCount]; 858 unsigned rnd = 42; 859 for (int i = 0; i < kCount; i++) { 860 uptr sz = my_rand_r(&rnd) % 1000; 861 char *p = (char*)InternalAlloc(sz); 862 EXPECT_NE(p, (char*)0); 863 ptrs[i] = p; 864 } 865 for (int i = 0; i < kCount; i++) { 866 InternalFree(ptrs[i]); 867 } 868 } 869 870 TEST(Allocator, LargeAlloc) { 871 void *p = InternalAlloc(10 << 20); 872 InternalFree(p); 873 } 874 875 TEST(Allocator, ScopedBuffer) { 876 const int kSize = 512; 877 { 878 InternalMmapVector<int> int_buf(kSize); 879 EXPECT_EQ((uptr)kSize, int_buf.size()); 880 } 881 InternalMmapVector<char> char_buf(kSize); 882 EXPECT_EQ((uptr)kSize, char_buf.size()); 883 internal_memset(char_buf.data(), 'c', kSize); 884 for (int i = 0; i < kSize; i++) { 885 EXPECT_EQ('c', char_buf[i]); 886 } 887 } 888 889 void IterationTestCallback(uptr chunk, void *arg) { 890 reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk); 891 } 892 893 template <class Allocator> 894 void TestSizeClassAllocatorIteration() { 895 Allocator *a = new Allocator; 896 a->Init(kReleaseToOSIntervalNever); 897 typename Allocator::AllocatorCache cache; 898 memset(&cache, 0, sizeof(cache)); 899 cache.Init(0); 900 901 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, 902 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000}; 903 904 std::vector<void *> allocated; 905 906 // Allocate a bunch of chunks. 907 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { 908 uptr size = sizes[s]; 909 if (!a->CanAllocate(size, 1)) continue; 910 // printf("s = %ld\n", size); 911 uptr n_iter = std::max((uptr)6, 80000 / size); 912 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); 913 for (uptr j = 0; j < n_iter; j++) { 914 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); 915 void *x = cache.Allocate(a, class_id0); 916 allocated.push_back(x); 917 } 918 } 919 920 std::set<uptr> reported_chunks; 921 a->ForceLock(); 922 a->ForEachChunk(IterationTestCallback, &reported_chunks); 923 a->ForceUnlock(); 924 925 for (uptr i = 0; i < allocated.size(); i++) { 926 // Don't use EXPECT_NE. Reporting the first mismatch is enough. 927 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), 928 reported_chunks.end()); 929 } 930 931 a->TestOnlyUnmap(); 932 delete a; 933 } 934 935 #if SANITIZER_CAN_USE_ALLOCATOR64 936 // These tests can fail on Windows if memory is somewhat full and lit happens 937 // to run them all at the same time. FIXME: Make them not flaky and reenable. 938 #if !SANITIZER_WINDOWS 939 TEST(SanitizerCommon, SizeClassAllocator64Iteration) { 940 TestSizeClassAllocatorIteration<Allocator64>(); 941 } 942 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) { 943 TestSizeClassAllocatorIteration<Allocator64Dynamic>(); 944 } 945 #endif 946 #endif 947 948 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) { 949 TestSizeClassAllocatorIteration<Allocator32Compact>(); 950 } 951 952 TEST(SanitizerCommon, LargeMmapAllocatorIteration) { 953 LargeMmapAllocator<NoOpMapUnmapCallback> a; 954 a.Init(); 955 AllocatorStats stats; 956 stats.Init(); 957 958 static const uptr kNumAllocs = 1000; 959 char *allocated[kNumAllocs]; 960 static const uptr size = 40; 961 // Allocate some. 962 for (uptr i = 0; i < kNumAllocs; i++) 963 allocated[i] = (char *)a.Allocate(&stats, size, 1); 964 965 std::set<uptr> reported_chunks; 966 a.ForceLock(); 967 a.ForEachChunk(IterationTestCallback, &reported_chunks); 968 a.ForceUnlock(); 969 970 for (uptr i = 0; i < kNumAllocs; i++) { 971 // Don't use EXPECT_NE. Reporting the first mismatch is enough. 972 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), 973 reported_chunks.end()); 974 } 975 for (uptr i = 0; i < kNumAllocs; i++) 976 a.Deallocate(&stats, allocated[i]); 977 } 978 979 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { 980 LargeMmapAllocator<NoOpMapUnmapCallback> a; 981 a.Init(); 982 AllocatorStats stats; 983 stats.Init(); 984 985 static const uptr kNumAllocs = 1024; 986 static const uptr kNumExpectedFalseLookups = 10000000; 987 char *allocated[kNumAllocs]; 988 static const uptr size = 4096; 989 // Allocate some. 990 for (uptr i = 0; i < kNumAllocs; i++) { 991 allocated[i] = (char *)a.Allocate(&stats, size, 1); 992 } 993 994 a.ForceLock(); 995 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { 996 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i); 997 char *p1 = allocated[i % kNumAllocs]; 998 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1)); 999 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2)); 1000 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1)); 1001 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100)); 1002 } 1003 1004 for (uptr i = 0; i < kNumExpectedFalseLookups; i++) { 1005 void *p = reinterpret_cast<void *>(i % 1024); 1006 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p)); 1007 p = reinterpret_cast<void *>(~0L - (i % 1024)); 1008 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p)); 1009 } 1010 a.ForceUnlock(); 1011 1012 for (uptr i = 0; i < kNumAllocs; i++) 1013 a.Deallocate(&stats, allocated[i]); 1014 } 1015 1016 1017 // Don't test OOM conditions on Win64 because it causes other tests on the same 1018 // machine to OOM. 1019 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID 1020 typedef __sanitizer::SizeClassMap<3, 4, 8, 38, 128, 16> SpecialSizeClassMap; 1021 template <typename AddressSpaceViewTy = LocalAddressSpaceView> 1022 struct AP64_SpecialSizeClassMap { 1023 static const uptr kSpaceBeg = kAllocatorSpace; 1024 static const uptr kSpaceSize = kAllocatorSize; 1025 static const uptr kMetadataSize = 0; 1026 typedef SpecialSizeClassMap SizeClassMap; 1027 typedef NoOpMapUnmapCallback MapUnmapCallback; 1028 static const uptr kFlags = 0; 1029 using AddressSpaceView = AddressSpaceViewTy; 1030 }; 1031 1032 // Regression test for out-of-memory condition in PopulateFreeList(). 1033 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { 1034 // In a world where regions are small and chunks are huge... 1035 typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64; 1036 const uptr kRegionSize = 1037 kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded; 1038 SpecialAllocator64 *a = new SpecialAllocator64; 1039 a->Init(kReleaseToOSIntervalNever); 1040 SpecialAllocator64::AllocatorCache cache; 1041 memset(&cache, 0, sizeof(cache)); 1042 cache.Init(0); 1043 1044 // ...one man is on a mission to overflow a region with a series of 1045 // successive allocations. 1046 1047 const uptr kClassID = 107; 1048 const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID); 1049 ASSERT_LT(2 * kAllocationSize, kRegionSize); 1050 ASSERT_GT(3 * kAllocationSize, kRegionSize); 1051 EXPECT_NE(cache.Allocate(a, kClassID), nullptr); 1052 EXPECT_NE(cache.Allocate(a, kClassID), nullptr); 1053 EXPECT_EQ(cache.Allocate(a, kClassID), nullptr); 1054 1055 const uptr Class2 = 100; 1056 const uptr Size2 = SpecialSizeClassMap::Size(Class2); 1057 ASSERT_EQ(Size2 * 8, kRegionSize); 1058 char *p[7]; 1059 for (int i = 0; i < 7; i++) { 1060 p[i] = (char*)cache.Allocate(a, Class2); 1061 EXPECT_NE(p[i], nullptr); 1062 fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2); 1063 p[i][Size2 - 1] = 42; 1064 if (i) ASSERT_LT(p[i - 1], p[i]); 1065 } 1066 EXPECT_EQ(cache.Allocate(a, Class2), nullptr); 1067 cache.Deallocate(a, Class2, p[0]); 1068 cache.Drain(a); 1069 ASSERT_EQ(p[6][Size2 - 1], 42); 1070 a->TestOnlyUnmap(); 1071 delete a; 1072 } 1073 1074 #endif 1075 1076 #if SANITIZER_CAN_USE_ALLOCATOR64 1077 1078 class NoMemoryMapper { 1079 public: 1080 uptr last_request_buffer_size; 1081 1082 NoMemoryMapper() : last_request_buffer_size(0) {} 1083 1084 void *MapPackedCounterArrayBuffer(uptr buffer_size) { 1085 last_request_buffer_size = buffer_size; 1086 return nullptr; 1087 } 1088 void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {} 1089 }; 1090 1091 class RedZoneMemoryMapper { 1092 public: 1093 RedZoneMemoryMapper() { 1094 const auto page_size = GetPageSize(); 1095 buffer = MmapOrDie(3ULL * page_size, ""); 1096 MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size); 1097 MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size); 1098 } 1099 ~RedZoneMemoryMapper() { 1100 UnmapOrDie(buffer, 3 * GetPageSize()); 1101 } 1102 1103 void *MapPackedCounterArrayBuffer(uptr buffer_size) { 1104 const auto page_size = GetPageSize(); 1105 CHECK_EQ(buffer_size, page_size); 1106 void *p = 1107 reinterpret_cast<void *>(reinterpret_cast<uptr>(buffer) + page_size); 1108 memset(p, 0, page_size); 1109 return p; 1110 } 1111 void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {} 1112 1113 private: 1114 void *buffer; 1115 }; 1116 1117 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) { 1118 NoMemoryMapper no_memory_mapper; 1119 typedef Allocator64::PackedCounterArray<NoMemoryMapper> 1120 NoMemoryPackedCounterArray; 1121 1122 for (int i = 0; i < 64; i++) { 1123 // Various valid counter's max values packed into one word. 1124 NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper); 1125 EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); 1126 1127 // Check the "all bit set" values too. 1128 NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper); 1129 EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); 1130 1131 // Verify the packing ratio, the counter is expected to be packed into the 1132 // closest power of 2 bits. 1133 NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper); 1134 EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1), 1135 no_memory_mapper.last_request_buffer_size); 1136 } 1137 1138 RedZoneMemoryMapper memory_mapper; 1139 typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper> 1140 RedZonePackedCounterArray; 1141 // Go through 1, 2, 4, 8, .. 64 bits per counter. 1142 for (int i = 0; i < 7; i++) { 1143 // Make sure counters request one memory page for the buffer. 1144 const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i); 1145 RedZonePackedCounterArray counters(kNumCounters, 1146 1ULL << ((1 << i) - 1), 1147 &memory_mapper); 1148 counters.Inc(0); 1149 for (u64 c = 1; c < kNumCounters - 1; c++) { 1150 ASSERT_EQ(0ULL, counters.Get(c)); 1151 counters.Inc(c); 1152 ASSERT_EQ(1ULL, counters.Get(c - 1)); 1153 } 1154 ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1)); 1155 counters.Inc(kNumCounters - 1); 1156 1157 if (i > 0) { 1158 counters.IncRange(0, kNumCounters - 1); 1159 for (u64 c = 0; c < kNumCounters; c++) 1160 ASSERT_EQ(2ULL, counters.Get(c)); 1161 } 1162 } 1163 } 1164 1165 class RangeRecorder { 1166 public: 1167 std::string reported_pages; 1168 1169 RangeRecorder() 1170 : page_size_scaled_log( 1171 Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)), 1172 last_page_reported(0) {} 1173 1174 void ReleasePageRangeToOS(u32 from, u32 to) { 1175 from >>= page_size_scaled_log; 1176 to >>= page_size_scaled_log; 1177 ASSERT_LT(from, to); 1178 if (!reported_pages.empty()) 1179 ASSERT_LT(last_page_reported, from); 1180 reported_pages.append(from - last_page_reported, '.'); 1181 reported_pages.append(to - from, 'x'); 1182 last_page_reported = to; 1183 } 1184 private: 1185 const uptr page_size_scaled_log; 1186 u32 last_page_reported; 1187 }; 1188 1189 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) { 1190 typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker; 1191 1192 // 'x' denotes a page to be released, '.' denotes a page to be kept around. 1193 const char* test_cases[] = { 1194 "", 1195 ".", 1196 "x", 1197 "........", 1198 "xxxxxxxxxxx", 1199 "..............xxxxx", 1200 "xxxxxxxxxxxxxxxxxx.....", 1201 "......xxxxxxxx........", 1202 "xxx..........xxxxxxxxxxxxxxx", 1203 "......xxxx....xxxx........", 1204 "xxx..........xxxxxxxx....xxxxxxx", 1205 "x.x.x.x.x.x.x.x.x.x.x.x.", 1206 ".x.x.x.x.x.x.x.x.x.x.x.x", 1207 ".x.x.x.x.x.x.x.x.x.x.x.x.", 1208 "x.x.x.x.x.x.x.x.x.x.x.x.x", 1209 }; 1210 1211 for (auto test_case : test_cases) { 1212 RangeRecorder range_recorder; 1213 RangeTracker tracker(&range_recorder); 1214 for (int i = 0; test_case[i] != 0; i++) 1215 tracker.NextPage(test_case[i] == 'x'); 1216 tracker.Done(); 1217 // Strip trailing '.'-pages before comparing the results as they are not 1218 // going to be reported to range_recorder anyway. 1219 const char* last_x = strrchr(test_case, 'x'); 1220 std::string expected( 1221 test_case, 1222 last_x == nullptr ? 0 : (last_x - test_case + 1)); 1223 EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str()); 1224 } 1225 } 1226 1227 class ReleasedPagesTrackingMemoryMapper { 1228 public: 1229 std::set<u32> reported_pages; 1230 1231 void *MapPackedCounterArrayBuffer(uptr buffer_size) { 1232 reported_pages.clear(); 1233 return calloc(1, buffer_size); 1234 } 1235 void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) { 1236 free(buffer); 1237 } 1238 1239 void ReleasePageRangeToOS(u32 from, u32 to) { 1240 uptr page_size_scaled = 1241 GetPageSizeCached() >> Allocator64::kCompactPtrScale; 1242 for (u32 i = from; i < to; i += page_size_scaled) 1243 reported_pages.insert(i); 1244 } 1245 }; 1246 1247 template <class Allocator> 1248 void TestReleaseFreeMemoryToOS() { 1249 ReleasedPagesTrackingMemoryMapper memory_mapper; 1250 const uptr kAllocatedPagesCount = 1024; 1251 const uptr page_size = GetPageSizeCached(); 1252 const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale; 1253 std::mt19937 r; 1254 uint32_t rnd_state = 42; 1255 1256 for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID; 1257 class_id++) { 1258 const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id); 1259 const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale; 1260 const uptr max_chunks = 1261 kAllocatedPagesCount * GetPageSizeCached() / chunk_size; 1262 1263 // Generate the random free list. 1264 std::vector<u32> free_array; 1265 bool in_free_range = false; 1266 uptr current_range_end = 0; 1267 for (uptr i = 0; i < max_chunks; i++) { 1268 if (i == current_range_end) { 1269 in_free_range = (my_rand_r(&rnd_state) & 1U) == 1; 1270 current_range_end += my_rand_r(&rnd_state) % 100 + 1; 1271 } 1272 if (in_free_range) 1273 free_array.push_back(i * chunk_size_scaled); 1274 } 1275 if (free_array.empty()) 1276 continue; 1277 // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on 1278 // the list ordering. 1279 std::shuffle(free_array.begin(), free_array.end(), r); 1280 1281 Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(), 1282 chunk_size, kAllocatedPagesCount, 1283 &memory_mapper); 1284 1285 // Verify that there are no released pages touched by used chunks and all 1286 // ranges of free chunks big enough to contain the entire memory pages had 1287 // these pages released. 1288 uptr verified_released_pages = 0; 1289 std::set<u32> free_chunks(free_array.begin(), free_array.end()); 1290 1291 u32 current_chunk = 0; 1292 in_free_range = false; 1293 u32 current_free_range_start = 0; 1294 for (uptr i = 0; i <= max_chunks; i++) { 1295 bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end(); 1296 1297 if (is_free_chunk) { 1298 if (!in_free_range) { 1299 in_free_range = true; 1300 current_free_range_start = current_chunk; 1301 } 1302 } else { 1303 // Verify that this used chunk does not touch any released page. 1304 for (uptr i_page = current_chunk / page_size_scaled; 1305 i_page <= (current_chunk + chunk_size_scaled - 1) / 1306 page_size_scaled; 1307 i_page++) { 1308 bool page_released = 1309 memory_mapper.reported_pages.find(i_page * page_size_scaled) != 1310 memory_mapper.reported_pages.end(); 1311 ASSERT_EQ(false, page_released); 1312 } 1313 1314 if (in_free_range) { 1315 in_free_range = false; 1316 // Verify that all entire memory pages covered by this range of free 1317 // chunks were released. 1318 u32 page = RoundUpTo(current_free_range_start, page_size_scaled); 1319 while (page + page_size_scaled <= current_chunk) { 1320 bool page_released = 1321 memory_mapper.reported_pages.find(page) != 1322 memory_mapper.reported_pages.end(); 1323 ASSERT_EQ(true, page_released); 1324 verified_released_pages++; 1325 page += page_size_scaled; 1326 } 1327 } 1328 } 1329 1330 current_chunk += chunk_size_scaled; 1331 } 1332 1333 ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages); 1334 } 1335 } 1336 1337 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) { 1338 TestReleaseFreeMemoryToOS<Allocator64>(); 1339 } 1340 1341 #if !SANITIZER_ANDROID 1342 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) { 1343 TestReleaseFreeMemoryToOS<Allocator64Compact>(); 1344 } 1345 1346 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) { 1347 TestReleaseFreeMemoryToOS<Allocator64VeryCompact>(); 1348 } 1349 #endif // !SANITIZER_ANDROID 1350 1351 #endif // SANITIZER_CAN_USE_ALLOCATOR64 1352 1353 TEST(SanitizerCommon, TwoLevelByteMap) { 1354 const u64 kSize1 = 1 << 6, kSize2 = 1 << 12; 1355 const u64 n = kSize1 * kSize2; 1356 TwoLevelByteMap<kSize1, kSize2> m; 1357 m.Init(); 1358 for (u64 i = 0; i < n; i += 7) { 1359 m.set(i, (i % 100) + 1); 1360 } 1361 for (u64 j = 0; j < n; j++) { 1362 if (j % 7) 1363 EXPECT_EQ(m[j], 0); 1364 else 1365 EXPECT_EQ(m[j], (j % 100) + 1); 1366 } 1367 1368 m.TestOnlyUnmap(); 1369 } 1370 1371 template <typename AddressSpaceView> 1372 using TestByteMapASVT = 1373 TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>; 1374 using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>; 1375 1376 struct TestByteMapParam { 1377 TestByteMap *m; 1378 size_t shard; 1379 size_t num_shards; 1380 }; 1381 1382 void *TwoLevelByteMapUserThread(void *param) { 1383 TestByteMapParam *p = (TestByteMapParam*)param; 1384 for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) { 1385 size_t val = (i % 100) + 1; 1386 p->m->set(i, val); 1387 EXPECT_EQ((*p->m)[i], val); 1388 } 1389 return 0; 1390 } 1391 1392 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) { 1393 TestByteMap m; 1394 m.Init(); 1395 TestMapUnmapCallback::map_count = 0; 1396 TestMapUnmapCallback::unmap_count = 0; 1397 static const int kNumThreads = 4; 1398 pthread_t t[kNumThreads]; 1399 TestByteMapParam p[kNumThreads]; 1400 for (int i = 0; i < kNumThreads; i++) { 1401 p[i].m = &m; 1402 p[i].shard = i; 1403 p[i].num_shards = kNumThreads; 1404 PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]); 1405 } 1406 for (int i = 0; i < kNumThreads; i++) { 1407 PTHREAD_JOIN(t[i], 0); 1408 } 1409 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1()); 1410 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL); 1411 m.TestOnlyUnmap(); 1412 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1()); 1413 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1()); 1414 } 1415 1416 TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) { 1417 // When allocating a memory block slightly bigger than a memory page and 1418 // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round 1419 // the size up to the page size, so that subsequent calls to the allocator 1420 // can use the remaining space in the last allocated page. 1421 static LowLevelAllocator allocator; 1422 char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16); 1423 char *ptr2 = (char *)allocator.Allocate(16); 1424 EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16); 1425 } 1426 1427 #endif // #if !SANITIZER_DEBUG 1428