1 //===-- primary_test.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "tests/scudo_unit_test.h" 10 11 #include "primary32.h" 12 #include "primary64.h" 13 #include "size_class_map.h" 14 15 #include <condition_variable> 16 #include <mutex> 17 #include <thread> 18 #include <vector> 19 20 // Note that with small enough regions, the SizeClassAllocator64 also works on 21 // 32-bit architectures. It's not something we want to encourage, but we still 22 // should ensure the tests pass. 23 24 template <typename Primary> static void testPrimary() { 25 const scudo::uptr NumberOfAllocations = 32U; 26 auto Deleter = [](Primary *P) { 27 P->unmapTestOnly(); 28 delete P; 29 }; 30 std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter); 31 Allocator->init(/*ReleaseToOsInterval=*/-1); 32 typename Primary::CacheT Cache; 33 Cache.init(nullptr, Allocator.get()); 34 for (scudo::uptr I = 0; I <= 16U; I++) { 35 const scudo::uptr Size = 1UL << I; 36 if (!Primary::canAllocate(Size)) 37 continue; 38 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); 39 void *Pointers[NumberOfAllocations]; 40 for (scudo::uptr J = 0; J < NumberOfAllocations; J++) { 41 void *P = Cache.allocate(ClassId); 42 memset(P, 'B', Size); 43 Pointers[J] = P; 44 } 45 for (scudo::uptr J = 0; J < NumberOfAllocations; J++) 46 Cache.deallocate(ClassId, Pointers[J]); 47 } 48 Cache.destroy(nullptr); 49 Allocator->releaseToOS(); 50 scudo::ScopedString Str(1024); 51 Allocator->getStats(&Str); 52 Str.output(); 53 } 54 55 TEST(ScudoPrimaryTest, BasicPrimary) { 56 using SizeClassMap = scudo::DefaultSizeClassMap; 57 #if !SCUDO_FUCHSIA 58 testPrimary<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); 59 #endif 60 testPrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); 61 } 62 63 // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes. 64 // For the 32-bit one, it requires actually exhausting memory, so we skip it. 65 TEST(ScudoPrimaryTest, Primary64OOM) { 66 using Primary = scudo::SizeClassAllocator64<scudo::DefaultSizeClassMap, 20U>; 67 using TransferBatch = Primary::CacheT::TransferBatch; 68 Primary Allocator; 69 Allocator.init(/*ReleaseToOsInterval=*/-1); 70 typename Primary::CacheT Cache; 71 scudo::GlobalStats Stats; 72 Stats.init(); 73 Cache.init(&Stats, &Allocator); 74 bool AllocationFailed = false; 75 std::vector<TransferBatch *> Batches; 76 const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId; 77 const scudo::uptr Size = Primary::getSizeByClassId(ClassId); 78 for (scudo::uptr I = 0; I < 10000U; I++) { 79 TransferBatch *B = Allocator.popBatch(&Cache, ClassId); 80 if (!B) { 81 AllocationFailed = true; 82 break; 83 } 84 for (scudo::u32 J = 0; J < B->getCount(); J++) 85 memset(B->get(J), 'B', Size); 86 Batches.push_back(B); 87 } 88 while (!Batches.empty()) { 89 Allocator.pushBatch(ClassId, Batches.back()); 90 Batches.pop_back(); 91 } 92 Cache.destroy(nullptr); 93 Allocator.releaseToOS(); 94 scudo::ScopedString Str(1024); 95 Allocator.getStats(&Str); 96 Str.output(); 97 EXPECT_EQ(AllocationFailed, true); 98 Allocator.unmapTestOnly(); 99 } 100 101 template <typename Primary> static void testIteratePrimary() { 102 auto Deleter = [](Primary *P) { 103 P->unmapTestOnly(); 104 delete P; 105 }; 106 std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter); 107 Allocator->init(/*ReleaseToOsInterval=*/-1); 108 typename Primary::CacheT Cache; 109 Cache.init(nullptr, Allocator.get()); 110 std::vector<std::pair<scudo::uptr, void *>> V; 111 for (scudo::uptr I = 0; I < 64U; I++) { 112 const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize; 113 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); 114 void *P = Cache.allocate(ClassId); 115 V.push_back(std::make_pair(ClassId, P)); 116 } 117 scudo::uptr Found = 0; 118 auto Lambda = [V, &Found](scudo::uptr Block) { 119 for (const auto &Pair : V) { 120 if (Pair.second == reinterpret_cast<void *>(Block)) 121 Found++; 122 } 123 }; 124 Allocator->disable(); 125 Allocator->iterateOverBlocks(Lambda); 126 Allocator->enable(); 127 EXPECT_EQ(Found, V.size()); 128 while (!V.empty()) { 129 auto Pair = V.back(); 130 Cache.deallocate(Pair.first, Pair.second); 131 V.pop_back(); 132 } 133 Cache.destroy(nullptr); 134 Allocator->releaseToOS(); 135 scudo::ScopedString Str(1024); 136 Allocator->getStats(&Str); 137 Str.output(); 138 } 139 140 TEST(ScudoPrimaryTest, PrimaryIterate) { 141 using SizeClassMap = scudo::DefaultSizeClassMap; 142 #if !SCUDO_FUCHSIA 143 testIteratePrimary<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); 144 #endif 145 testIteratePrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); 146 } 147 148 static std::mutex Mutex; 149 static std::condition_variable Cv; 150 static bool Ready = false; 151 152 template <typename Primary> static void performAllocations(Primary *Allocator) { 153 static THREADLOCAL typename Primary::CacheT Cache; 154 Cache.init(nullptr, Allocator); 155 std::vector<std::pair<scudo::uptr, void *>> V; 156 { 157 std::unique_lock<std::mutex> Lock(Mutex); 158 while (!Ready) 159 Cv.wait(Lock); 160 } 161 for (scudo::uptr I = 0; I < 256U; I++) { 162 const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize / 4; 163 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); 164 void *P = Cache.allocate(ClassId); 165 if (P) 166 V.push_back(std::make_pair(ClassId, P)); 167 } 168 while (!V.empty()) { 169 auto Pair = V.back(); 170 Cache.deallocate(Pair.first, Pair.second); 171 V.pop_back(); 172 } 173 Cache.destroy(nullptr); 174 } 175 176 template <typename Primary> static void testPrimaryThreaded() { 177 auto Deleter = [](Primary *P) { 178 P->unmapTestOnly(); 179 delete P; 180 }; 181 std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter); 182 Allocator->init(/*ReleaseToOsInterval=*/-1); 183 std::thread Threads[32]; 184 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) 185 Threads[I] = std::thread(performAllocations<Primary>, Allocator.get()); 186 { 187 std::unique_lock<std::mutex> Lock(Mutex); 188 Ready = true; 189 Cv.notify_all(); 190 } 191 for (auto &T : Threads) 192 T.join(); 193 Allocator->releaseToOS(); 194 scudo::ScopedString Str(1024); 195 Allocator->getStats(&Str); 196 Str.output(); 197 } 198 199 TEST(ScudoPrimaryTest, PrimaryThreaded) { 200 using SizeClassMap = scudo::SvelteSizeClassMap; 201 #if !SCUDO_FUCHSIA 202 testPrimaryThreaded<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); 203 #endif 204 testPrimaryThreaded<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); 205 } 206 207 // Through a simple allocation that spans two pages, verify that releaseToOS 208 // actually releases some bytes (at least one page worth). This is a regression 209 // test for an error in how the release criteria were computed. 210 template <typename Primary> static void testReleaseToOS() { 211 auto Deleter = [](Primary *P) { 212 P->unmapTestOnly(); 213 delete P; 214 }; 215 std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter); 216 Allocator->init(/*ReleaseToOsInterval=*/-1); 217 typename Primary::CacheT Cache; 218 Cache.init(nullptr, Allocator.get()); 219 const scudo::uptr Size = scudo::getPageSizeCached() * 2; 220 EXPECT_TRUE(Primary::canAllocate(Size)); 221 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); 222 void *P = Cache.allocate(ClassId); 223 EXPECT_NE(P, nullptr); 224 Cache.deallocate(ClassId, P); 225 Cache.destroy(nullptr); 226 EXPECT_GT(Allocator->releaseToOS(), 0U); 227 } 228 229 TEST(ScudoPrimaryTest, ReleaseToOS) { 230 using SizeClassMap = scudo::DefaultSizeClassMap; 231 #if !SCUDO_FUCHSIA 232 testReleaseToOS<scudo::SizeClassAllocator32<SizeClassMap, 18U>>(); 233 #endif 234 testReleaseToOS<scudo::SizeClassAllocator64<SizeClassMap, 24U>>(); 235 } 236