xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- primary_test.cpp ----------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick 
93cab2bb3Spatrick #include "tests/scudo_unit_test.h"
103cab2bb3Spatrick 
113cab2bb3Spatrick #include "primary32.h"
123cab2bb3Spatrick #include "primary64.h"
133cab2bb3Spatrick #include "size_class_map.h"
143cab2bb3Spatrick 
15*810390e3Srobert #include <algorithm>
16*810390e3Srobert #include <chrono>
173cab2bb3Spatrick #include <condition_variable>
183cab2bb3Spatrick #include <mutex>
19*810390e3Srobert #include <random>
20d89ec533Spatrick #include <stdlib.h>
213cab2bb3Spatrick #include <thread>
223cab2bb3Spatrick #include <vector>
233cab2bb3Spatrick 
243cab2bb3Spatrick // Note that with small enough regions, the SizeClassAllocator64 also works on
253cab2bb3Spatrick // 32-bit architectures. It's not something we want to encourage, but we still
263cab2bb3Spatrick // should ensure the tests pass.
273cab2bb3Spatrick 
28d89ec533Spatrick struct TestConfig1 {
29d89ec533Spatrick   static const scudo::uptr PrimaryRegionSizeLog = 18U;
30*810390e3Srobert   static const scudo::uptr PrimaryGroupSizeLog = 18U;
31d89ec533Spatrick   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
32d89ec533Spatrick   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
33d89ec533Spatrick   static const bool MaySupportMemoryTagging = false;
34d89ec533Spatrick   typedef scudo::uptr PrimaryCompactPtrT;
35d89ec533Spatrick   static const scudo::uptr PrimaryCompactPtrScale = 0;
36d89ec533Spatrick   static const bool PrimaryEnableRandomOffset = true;
37d89ec533Spatrick   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
383cab2bb3Spatrick };
39d89ec533Spatrick 
40d89ec533Spatrick struct TestConfig2 {
41d89ec533Spatrick #if defined(__mips__)
42d89ec533Spatrick   // Unable to allocate greater size on QEMU-user.
43d89ec533Spatrick   static const scudo::uptr PrimaryRegionSizeLog = 23U;
44d89ec533Spatrick #else
45d89ec533Spatrick   static const scudo::uptr PrimaryRegionSizeLog = 24U;
46d89ec533Spatrick #endif
47*810390e3Srobert   static const scudo::uptr PrimaryGroupSizeLog = 20U;
48d89ec533Spatrick   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
49d89ec533Spatrick   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
50d89ec533Spatrick   static const bool MaySupportMemoryTagging = false;
51d89ec533Spatrick   typedef scudo::uptr PrimaryCompactPtrT;
52d89ec533Spatrick   static const scudo::uptr PrimaryCompactPtrScale = 0;
53d89ec533Spatrick   static const bool PrimaryEnableRandomOffset = true;
54d89ec533Spatrick   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
55d89ec533Spatrick };
56d89ec533Spatrick 
57d89ec533Spatrick struct TestConfig3 {
58d89ec533Spatrick #if defined(__mips__)
59d89ec533Spatrick   // Unable to allocate greater size on QEMU-user.
60d89ec533Spatrick   static const scudo::uptr PrimaryRegionSizeLog = 23U;
61d89ec533Spatrick #else
62d89ec533Spatrick   static const scudo::uptr PrimaryRegionSizeLog = 24U;
63d89ec533Spatrick #endif
64*810390e3Srobert   static const scudo::uptr PrimaryGroupSizeLog = 20U;
65d89ec533Spatrick   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
66d89ec533Spatrick   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
67d89ec533Spatrick   static const bool MaySupportMemoryTagging = true;
68d89ec533Spatrick   typedef scudo::uptr PrimaryCompactPtrT;
69d89ec533Spatrick   static const scudo::uptr PrimaryCompactPtrScale = 0;
70d89ec533Spatrick   static const bool PrimaryEnableRandomOffset = true;
71d89ec533Spatrick   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
72d89ec533Spatrick };
73d89ec533Spatrick 
74*810390e3Srobert struct TestConfig4 {
75*810390e3Srobert #if defined(__mips__)
76*810390e3Srobert   // Unable to allocate greater size on QEMU-user.
77*810390e3Srobert   static const scudo::uptr PrimaryRegionSizeLog = 23U;
78*810390e3Srobert #else
79*810390e3Srobert   static const scudo::uptr PrimaryRegionSizeLog = 24U;
80*810390e3Srobert #endif
81*810390e3Srobert   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
82*810390e3Srobert   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
83*810390e3Srobert   static const bool MaySupportMemoryTagging = true;
84*810390e3Srobert   static const scudo::uptr PrimaryCompactPtrScale = 3U;
85*810390e3Srobert   static const scudo::uptr PrimaryGroupSizeLog = 20U;
86*810390e3Srobert   typedef scudo::u32 PrimaryCompactPtrT;
87*810390e3Srobert   static const bool PrimaryEnableRandomOffset = true;
88*810390e3Srobert   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
89*810390e3Srobert };
90*810390e3Srobert 
91d89ec533Spatrick template <typename BaseConfig, typename SizeClassMapT>
92d89ec533Spatrick struct Config : public BaseConfig {
93d89ec533Spatrick   using SizeClassMap = SizeClassMapT;
94d89ec533Spatrick };
95d89ec533Spatrick 
96d89ec533Spatrick template <typename BaseConfig, typename SizeClassMapT>
97d89ec533Spatrick struct SizeClassAllocator
98d89ec533Spatrick     : public scudo::SizeClassAllocator64<Config<BaseConfig, SizeClassMapT>> {};
99d89ec533Spatrick template <typename SizeClassMapT>
100d89ec533Spatrick struct SizeClassAllocator<TestConfig1, SizeClassMapT>
101d89ec533Spatrick     : public scudo::SizeClassAllocator32<Config<TestConfig1, SizeClassMapT>> {};
102d89ec533Spatrick 
103d89ec533Spatrick template <typename BaseConfig, typename SizeClassMapT>
104d89ec533Spatrick struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
~TestAllocatorTestAllocator105d89ec533Spatrick   ~TestAllocator() { this->unmapTestOnly(); }
106d89ec533Spatrick 
operator newTestAllocator107d89ec533Spatrick   void *operator new(size_t size) {
108d89ec533Spatrick     void *p = nullptr;
109d89ec533Spatrick     EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
110d89ec533Spatrick     return p;
111d89ec533Spatrick   }
112d89ec533Spatrick 
operator deleteTestAllocator113d89ec533Spatrick   void operator delete(void *ptr) { free(ptr); }
114d89ec533Spatrick };
115d89ec533Spatrick 
116d89ec533Spatrick template <class BaseConfig> struct ScudoPrimaryTest : public Test {};
117d89ec533Spatrick 
118d89ec533Spatrick #if SCUDO_FUCHSIA
119d89ec533Spatrick #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
120d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2)                            \
121d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)
122d89ec533Spatrick #else
123d89ec533Spatrick #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
124d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1)                            \
125d89ec533Spatrick   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2)                            \
126*810390e3Srobert   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)                            \
127*810390e3Srobert   SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4)
128d89ec533Spatrick #endif
129d89ec533Spatrick 
130d89ec533Spatrick #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
131d89ec533Spatrick   using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<TYPE>;                          \
132*810390e3Srobert   TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
133d89ec533Spatrick 
134d89ec533Spatrick #define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
135d89ec533Spatrick   template <class TypeParam>                                                   \
136d89ec533Spatrick   struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
137d89ec533Spatrick     void Run();                                                                \
138d89ec533Spatrick   };                                                                           \
139d89ec533Spatrick   SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
140d89ec533Spatrick   template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
141d89ec533Spatrick 
SCUDO_TYPED_TEST(ScudoPrimaryTest,BasicPrimary)142d89ec533Spatrick SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
143d89ec533Spatrick   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
144d89ec533Spatrick   std::unique_ptr<Primary> Allocator(new Primary);
1453cab2bb3Spatrick   Allocator->init(/*ReleaseToOsInterval=*/-1);
1463cab2bb3Spatrick   typename Primary::CacheT Cache;
1473cab2bb3Spatrick   Cache.init(nullptr, Allocator.get());
148d89ec533Spatrick   const scudo::uptr NumberOfAllocations = 32U;
1493cab2bb3Spatrick   for (scudo::uptr I = 0; I <= 16U; I++) {
1503cab2bb3Spatrick     const scudo::uptr Size = 1UL << I;
1513cab2bb3Spatrick     if (!Primary::canAllocate(Size))
1523cab2bb3Spatrick       continue;
1533cab2bb3Spatrick     const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
1543cab2bb3Spatrick     void *Pointers[NumberOfAllocations];
1553cab2bb3Spatrick     for (scudo::uptr J = 0; J < NumberOfAllocations; J++) {
1563cab2bb3Spatrick       void *P = Cache.allocate(ClassId);
1573cab2bb3Spatrick       memset(P, 'B', Size);
1583cab2bb3Spatrick       Pointers[J] = P;
1593cab2bb3Spatrick     }
1603cab2bb3Spatrick     for (scudo::uptr J = 0; J < NumberOfAllocations; J++)
1613cab2bb3Spatrick       Cache.deallocate(ClassId, Pointers[J]);
1623cab2bb3Spatrick   }
1633cab2bb3Spatrick   Cache.destroy(nullptr);
1643cab2bb3Spatrick   Allocator->releaseToOS();
165d89ec533Spatrick   scudo::ScopedString Str;
1663cab2bb3Spatrick   Allocator->getStats(&Str);
1673cab2bb3Spatrick   Str.output();
1683cab2bb3Spatrick }
1693cab2bb3Spatrick 
170d89ec533Spatrick struct SmallRegionsConfig {
1713cab2bb3Spatrick   using SizeClassMap = scudo::DefaultSizeClassMap;
172*810390e3Srobert   static const scudo::uptr PrimaryRegionSizeLog = 21U;
173d89ec533Spatrick   static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
174d89ec533Spatrick   static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
175d89ec533Spatrick   static const bool MaySupportMemoryTagging = false;
176d89ec533Spatrick   typedef scudo::uptr PrimaryCompactPtrT;
177d89ec533Spatrick   static const scudo::uptr PrimaryCompactPtrScale = 0;
178d89ec533Spatrick   static const bool PrimaryEnableRandomOffset = true;
179d89ec533Spatrick   static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
180*810390e3Srobert   static const scudo::uptr PrimaryGroupSizeLog = 20U;
181d89ec533Spatrick };
1823cab2bb3Spatrick 
1833cab2bb3Spatrick // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
1843cab2bb3Spatrick // For the 32-bit one, it requires actually exhausting memory, so we skip it.
TEST(ScudoPrimaryTest,Primary64OOM)1853cab2bb3Spatrick TEST(ScudoPrimaryTest, Primary64OOM) {
186d89ec533Spatrick   using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
1873cab2bb3Spatrick   using TransferBatch = Primary::CacheT::TransferBatch;
1883cab2bb3Spatrick   Primary Allocator;
1893cab2bb3Spatrick   Allocator.init(/*ReleaseToOsInterval=*/-1);
1903cab2bb3Spatrick   typename Primary::CacheT Cache;
1913cab2bb3Spatrick   scudo::GlobalStats Stats;
1923cab2bb3Spatrick   Stats.init();
1933cab2bb3Spatrick   Cache.init(&Stats, &Allocator);
1943cab2bb3Spatrick   bool AllocationFailed = false;
1953cab2bb3Spatrick   std::vector<TransferBatch *> Batches;
1963cab2bb3Spatrick   const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
1973cab2bb3Spatrick   const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
198*810390e3Srobert   typename Primary::CacheT::CompactPtrT Blocks[TransferBatch::MaxNumCached];
199*810390e3Srobert 
2003cab2bb3Spatrick   for (scudo::uptr I = 0; I < 10000U; I++) {
2013cab2bb3Spatrick     TransferBatch *B = Allocator.popBatch(&Cache, ClassId);
2023cab2bb3Spatrick     if (!B) {
2033cab2bb3Spatrick       AllocationFailed = true;
2043cab2bb3Spatrick       break;
2053cab2bb3Spatrick     }
206*810390e3Srobert     for (scudo::u16 J = 0; J < B->getCount(); J++)
207d89ec533Spatrick       memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size);
2083cab2bb3Spatrick     Batches.push_back(B);
2093cab2bb3Spatrick   }
2103cab2bb3Spatrick   while (!Batches.empty()) {
211*810390e3Srobert     TransferBatch *B = Batches.back();
2123cab2bb3Spatrick     Batches.pop_back();
213*810390e3Srobert     B->copyToArray(Blocks);
214*810390e3Srobert     Allocator.pushBlocks(&Cache, ClassId, Blocks, B->getCount());
215*810390e3Srobert     Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
2163cab2bb3Spatrick   }
2173cab2bb3Spatrick   Cache.destroy(nullptr);
2183cab2bb3Spatrick   Allocator.releaseToOS();
219d89ec533Spatrick   scudo::ScopedString Str;
2203cab2bb3Spatrick   Allocator.getStats(&Str);
2213cab2bb3Spatrick   Str.output();
2223cab2bb3Spatrick   EXPECT_EQ(AllocationFailed, true);
2233cab2bb3Spatrick   Allocator.unmapTestOnly();
2243cab2bb3Spatrick }
2253cab2bb3Spatrick 
SCUDO_TYPED_TEST(ScudoPrimaryTest,PrimaryIterate)226d89ec533Spatrick SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
227d89ec533Spatrick   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
228d89ec533Spatrick   std::unique_ptr<Primary> Allocator(new Primary);
2293cab2bb3Spatrick   Allocator->init(/*ReleaseToOsInterval=*/-1);
2303cab2bb3Spatrick   typename Primary::CacheT Cache;
2313cab2bb3Spatrick   Cache.init(nullptr, Allocator.get());
2323cab2bb3Spatrick   std::vector<std::pair<scudo::uptr, void *>> V;
2333cab2bb3Spatrick   for (scudo::uptr I = 0; I < 64U; I++) {
2343cab2bb3Spatrick     const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize;
2353cab2bb3Spatrick     const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
2363cab2bb3Spatrick     void *P = Cache.allocate(ClassId);
2373cab2bb3Spatrick     V.push_back(std::make_pair(ClassId, P));
2383cab2bb3Spatrick   }
2393cab2bb3Spatrick   scudo::uptr Found = 0;
240*810390e3Srobert   auto Lambda = [&V, &Found](scudo::uptr Block) {
2413cab2bb3Spatrick     for (const auto &Pair : V) {
2423cab2bb3Spatrick       if (Pair.second == reinterpret_cast<void *>(Block))
2433cab2bb3Spatrick         Found++;
2443cab2bb3Spatrick     }
2453cab2bb3Spatrick   };
2463cab2bb3Spatrick   Allocator->disable();
2473cab2bb3Spatrick   Allocator->iterateOverBlocks(Lambda);
2483cab2bb3Spatrick   Allocator->enable();
2493cab2bb3Spatrick   EXPECT_EQ(Found, V.size());
2503cab2bb3Spatrick   while (!V.empty()) {
2513cab2bb3Spatrick     auto Pair = V.back();
2523cab2bb3Spatrick     Cache.deallocate(Pair.first, Pair.second);
2533cab2bb3Spatrick     V.pop_back();
2543cab2bb3Spatrick   }
2553cab2bb3Spatrick   Cache.destroy(nullptr);
2563cab2bb3Spatrick   Allocator->releaseToOS();
257d89ec533Spatrick   scudo::ScopedString Str;
2583cab2bb3Spatrick   Allocator->getStats(&Str);
2593cab2bb3Spatrick   Str.output();
2603cab2bb3Spatrick }
2613cab2bb3Spatrick 
SCUDO_TYPED_TEST(ScudoPrimaryTest,PrimaryThreaded)262d89ec533Spatrick SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
263d89ec533Spatrick   using Primary = TestAllocator<TypeParam, scudo::SvelteSizeClassMap>;
264d89ec533Spatrick   std::unique_ptr<Primary> Allocator(new Primary);
265d89ec533Spatrick   Allocator->init(/*ReleaseToOsInterval=*/-1);
266d89ec533Spatrick   std::mutex Mutex;
267d89ec533Spatrick   std::condition_variable Cv;
268d89ec533Spatrick   bool Ready = false;
269d89ec533Spatrick   std::thread Threads[32];
270d89ec533Spatrick   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
271d89ec533Spatrick     Threads[I] = std::thread([&]() {
272d89ec533Spatrick       static thread_local typename Primary::CacheT Cache;
273d89ec533Spatrick       Cache.init(nullptr, Allocator.get());
2743cab2bb3Spatrick       std::vector<std::pair<scudo::uptr, void *>> V;
2753cab2bb3Spatrick       {
2763cab2bb3Spatrick         std::unique_lock<std::mutex> Lock(Mutex);
2773cab2bb3Spatrick         while (!Ready)
2783cab2bb3Spatrick           Cv.wait(Lock);
2793cab2bb3Spatrick       }
2803cab2bb3Spatrick       for (scudo::uptr I = 0; I < 256U; I++) {
281d89ec533Spatrick         const scudo::uptr Size =
282d89ec533Spatrick             std::rand() % Primary::SizeClassMap::MaxSize / 4;
283d89ec533Spatrick         const scudo::uptr ClassId =
284d89ec533Spatrick             Primary::SizeClassMap::getClassIdBySize(Size);
2853cab2bb3Spatrick         void *P = Cache.allocate(ClassId);
2863cab2bb3Spatrick         if (P)
2873cab2bb3Spatrick           V.push_back(std::make_pair(ClassId, P));
2883cab2bb3Spatrick       }
2893cab2bb3Spatrick       while (!V.empty()) {
2903cab2bb3Spatrick         auto Pair = V.back();
2913cab2bb3Spatrick         Cache.deallocate(Pair.first, Pair.second);
2923cab2bb3Spatrick         V.pop_back();
2933cab2bb3Spatrick       }
2943cab2bb3Spatrick       Cache.destroy(nullptr);
295d89ec533Spatrick     });
2963cab2bb3Spatrick   {
2973cab2bb3Spatrick     std::unique_lock<std::mutex> Lock(Mutex);
2983cab2bb3Spatrick     Ready = true;
2993cab2bb3Spatrick     Cv.notify_all();
3003cab2bb3Spatrick   }
3013cab2bb3Spatrick   for (auto &T : Threads)
3023cab2bb3Spatrick     T.join();
3033cab2bb3Spatrick   Allocator->releaseToOS();
304d89ec533Spatrick   scudo::ScopedString Str;
3053cab2bb3Spatrick   Allocator->getStats(&Str);
3063cab2bb3Spatrick   Str.output();
3073cab2bb3Spatrick }
3083cab2bb3Spatrick 
3093cab2bb3Spatrick // Through a simple allocation that spans two pages, verify that releaseToOS
3103cab2bb3Spatrick // actually releases some bytes (at least one page worth). This is a regression
3113cab2bb3Spatrick // test for an error in how the release criteria were computed.
SCUDO_TYPED_TEST(ScudoPrimaryTest,ReleaseToOS)312d89ec533Spatrick SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) {
313d89ec533Spatrick   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
314d89ec533Spatrick   std::unique_ptr<Primary> Allocator(new Primary);
3153cab2bb3Spatrick   Allocator->init(/*ReleaseToOsInterval=*/-1);
3163cab2bb3Spatrick   typename Primary::CacheT Cache;
3173cab2bb3Spatrick   Cache.init(nullptr, Allocator.get());
3183cab2bb3Spatrick   const scudo::uptr Size = scudo::getPageSizeCached() * 2;
3193cab2bb3Spatrick   EXPECT_TRUE(Primary::canAllocate(Size));
3203cab2bb3Spatrick   const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
3213cab2bb3Spatrick   void *P = Cache.allocate(ClassId);
3223cab2bb3Spatrick   EXPECT_NE(P, nullptr);
3233cab2bb3Spatrick   Cache.deallocate(ClassId, P);
3243cab2bb3Spatrick   Cache.destroy(nullptr);
3253cab2bb3Spatrick   EXPECT_GT(Allocator->releaseToOS(), 0U);
3263cab2bb3Spatrick }
327*810390e3Srobert 
SCUDO_TYPED_TEST(ScudoPrimaryTest,MemoryGroup)328*810390e3Srobert SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
329*810390e3Srobert   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
330*810390e3Srobert   std::unique_ptr<Primary> Allocator(new Primary);
331*810390e3Srobert   Allocator->init(/*ReleaseToOsInterval=*/-1);
332*810390e3Srobert   typename Primary::CacheT Cache;
333*810390e3Srobert   Cache.init(nullptr, Allocator.get());
334*810390e3Srobert   const scudo::uptr Size = 32U;
335*810390e3Srobert   const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
336*810390e3Srobert 
337*810390e3Srobert   // We will allocate 4 times the group size memory and release all of them. We
338*810390e3Srobert   // expect the free blocks will be classified with groups. Then we will
339*810390e3Srobert   // allocate the same amount of memory as group size and expect the blocks will
340*810390e3Srobert   // have the max address difference smaller or equal to 2 times the group size.
341*810390e3Srobert   // Note that it isn't necessary to be in the range of single group size
342*810390e3Srobert   // because the way we get the group id is doing compact pointer shifting.
343*810390e3Srobert   // According to configuration, the compact pointer may not align to group
344*810390e3Srobert   // size. As a result, the blocks can cross two groups at most.
345*810390e3Srobert   const scudo::uptr GroupSizeMem = (1ULL << Primary::GroupSizeLog);
346*810390e3Srobert   const scudo::uptr PeakAllocationMem = 4 * GroupSizeMem;
347*810390e3Srobert   const scudo::uptr PeakNumberOfAllocations = PeakAllocationMem / Size;
348*810390e3Srobert   const scudo::uptr FinalNumberOfAllocations = GroupSizeMem / Size;
349*810390e3Srobert   std::vector<scudo::uptr> Blocks;
350*810390e3Srobert   std::mt19937 R;
351*810390e3Srobert 
352*810390e3Srobert   for (scudo::uptr I = 0; I < PeakNumberOfAllocations; ++I)
353*810390e3Srobert     Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
354*810390e3Srobert 
355*810390e3Srobert   std::shuffle(Blocks.begin(), Blocks.end(), R);
356*810390e3Srobert 
357*810390e3Srobert   // Release all the allocated blocks, including those held by local cache.
358*810390e3Srobert   while (!Blocks.empty()) {
359*810390e3Srobert     Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
360*810390e3Srobert     Blocks.pop_back();
361*810390e3Srobert   }
362*810390e3Srobert   Cache.drain();
363*810390e3Srobert 
364*810390e3Srobert   for (scudo::uptr I = 0; I < FinalNumberOfAllocations; ++I)
365*810390e3Srobert     Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
366*810390e3Srobert 
367*810390e3Srobert   EXPECT_LE(*std::max_element(Blocks.begin(), Blocks.end()) -
368*810390e3Srobert                 *std::min_element(Blocks.begin(), Blocks.end()),
369*810390e3Srobert             GroupSizeMem * 2);
370*810390e3Srobert }
371