xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include "allocator_config.h"
12 #include "combined.h"
13 
14 #include <condition_variable>
15 #include <mutex>
16 #include <thread>
17 #include <vector>
18 
19 static std::mutex Mutex;
20 static std::condition_variable Cv;
21 static bool Ready = false;
22 
23 static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
24 
25 template <class Config> static void testAllocator() {
26   using AllocatorT = scudo::Allocator<Config>;
27   auto Deleter = [](AllocatorT *A) {
28     A->unmapTestOnly();
29     delete A;
30   };
31   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
32                                                            Deleter);
33   Allocator->reset();
34 
35   EXPECT_FALSE(Allocator->isOwned(&Mutex));
36   EXPECT_FALSE(Allocator->isOwned(&Allocator));
37   scudo::u64 StackVariable = 0x42424242U;
38   EXPECT_FALSE(Allocator->isOwned(&StackVariable));
39   EXPECT_EQ(StackVariable, 0x42424242U);
40 
41   constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
42 
43   // This allocates and deallocates a bunch of chunks, with a wide range of
44   // sizes and alignments, with a focus on sizes that could trigger weird
45   // behaviors (plus or minus a small delta of a power of two for example).
46   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
47     for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
48       const scudo::uptr Align = 1U << AlignLog;
49       for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
50         if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
51           continue;
52         const scudo::uptr Size = (1U << SizeLog) + Delta;
53         void *P = Allocator->allocate(Size, Origin, Align);
54         EXPECT_NE(P, nullptr);
55         EXPECT_TRUE(Allocator->isOwned(P));
56         EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
57         EXPECT_LE(Size, Allocator->getUsableSize(P));
58         memset(P, 0xaa, Size);
59         Allocator->deallocate(P, Origin, Size);
60       }
61     }
62   }
63   Allocator->releaseToOS();
64 
65   // Ensure that specifying ZeroContents returns a zero'd out block.
66   for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
67     for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
68       const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
69       void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
70       EXPECT_NE(P, nullptr);
71       for (scudo::uptr I = 0; I < Size; I++)
72         EXPECT_EQ((reinterpret_cast<char *>(P))[I], 0);
73       memset(P, 0xaa, Size);
74       Allocator->deallocate(P, Origin, Size);
75     }
76   }
77   Allocator->releaseToOS();
78 
79   // Verify that a chunk will end up being reused, at some point.
80   const scudo::uptr NeedleSize = 1024U;
81   void *NeedleP = Allocator->allocate(NeedleSize, Origin);
82   Allocator->deallocate(NeedleP, Origin);
83   bool Found = false;
84   for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
85     void *P = Allocator->allocate(NeedleSize, Origin);
86     if (P == NeedleP)
87       Found = true;
88     Allocator->deallocate(P, Origin);
89   }
90   EXPECT_TRUE(Found);
91 
92   constexpr scudo::uptr MaxSize = Config::Primary::SizeClassMap::MaxSize;
93 
94   // Reallocate a large chunk all the way down to a byte, verifying that we
95   // preserve the data in the process.
96   scudo::uptr Size = MaxSize * 2;
97   const scudo::uptr DataSize = 2048U;
98   void *P = Allocator->allocate(Size, Origin);
99   const char Marker = 0xab;
100   memset(P, Marker, scudo::Min(Size, DataSize));
101   while (Size > 1U) {
102     Size /= 2U;
103     void *NewP = Allocator->reallocate(P, Size);
104     EXPECT_NE(NewP, nullptr);
105     for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
106       EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
107     P = NewP;
108   }
109   Allocator->deallocate(P, Origin);
110 
111   // Check that reallocating a chunk to a slightly smaller or larger size
112   // returns the same chunk. This requires that all the sizes we iterate on use
113   // the same block size, but that should be the case for 2048 with our default
114   // class size maps.
115   P = Allocator->allocate(DataSize, Origin);
116   memset(P, Marker, DataSize);
117   for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
118     const scudo::uptr NewSize = DataSize + Delta;
119     void *NewP = Allocator->reallocate(P, NewSize);
120     EXPECT_EQ(NewP, P);
121     for (scudo::uptr I = 0; I < DataSize - 32; I++)
122       EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
123   }
124   Allocator->deallocate(P, Origin);
125 
126   // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
127   // they are the ones we allocated. This requires the allocator to not have any
128   // other allocated chunk at this point (eg: won't work with the Quarantine).
129   if (!UseQuarantine) {
130     std::vector<void *> V;
131     for (scudo::uptr I = 0; I < 64U; I++)
132       V.push_back(Allocator->allocate(rand() % (MaxSize / 2U), Origin));
133     Allocator->disable();
134     Allocator->iterateOverChunks(
135         0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
136         [](uintptr_t Base, size_t Size, void *Arg) {
137           std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
138           void *P = reinterpret_cast<void *>(Base);
139           EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
140         },
141         reinterpret_cast<void *>(&V));
142     Allocator->enable();
143     while (!V.empty()) {
144       Allocator->deallocate(V.back(), Origin);
145       V.pop_back();
146     }
147   }
148 
149   Allocator->releaseToOS();
150 
151   scudo::uptr BufferSize = 8192;
152   std::vector<char> Buffer(BufferSize);
153   scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
154   while (ActualSize > BufferSize) {
155     BufferSize = ActualSize + 1024;
156     Buffer.resize(BufferSize);
157     ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
158   }
159   std::string Stats(Buffer.begin(), Buffer.end());
160   // Basic checks on the contents of the statistics output, which also allows us
161   // to verify that we got it all.
162   EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
163   EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
164   EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
165 }
166 
167 TEST(ScudoCombinedTest, BasicCombined) {
168   UseQuarantine = false;
169   testAllocator<scudo::AndroidSvelteConfig>();
170 #if SCUDO_FUCHSIA
171   testAllocator<scudo::FuchsiaConfig>();
172 #else
173   testAllocator<scudo::DefaultConfig>();
174   UseQuarantine = true;
175   testAllocator<scudo::AndroidConfig>();
176 #endif
177 }
178 
179 template <typename AllocatorT> static void stressAllocator(AllocatorT *A) {
180   {
181     std::unique_lock<std::mutex> Lock(Mutex);
182     while (!Ready)
183       Cv.wait(Lock);
184   }
185   std::vector<std::pair<void *, scudo::uptr>> V;
186   for (scudo::uptr I = 0; I < 256U; I++) {
187     const scudo::uptr Size = std::rand() % 4096U;
188     void *P = A->allocate(Size, Origin);
189     // A region could have ran out of memory, resulting in a null P.
190     if (P)
191       V.push_back(std::make_pair(P, Size));
192   }
193   while (!V.empty()) {
194     auto Pair = V.back();
195     A->deallocate(Pair.first, Origin, Pair.second);
196     V.pop_back();
197   }
198 }
199 
200 template <class Config> static void testAllocatorThreaded() {
201   using AllocatorT = scudo::Allocator<Config>;
202   auto Deleter = [](AllocatorT *A) {
203     A->unmapTestOnly();
204     delete A;
205   };
206   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
207                                                            Deleter);
208   Allocator->reset();
209   std::thread Threads[32];
210   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
211     Threads[I] = std::thread(stressAllocator<AllocatorT>, Allocator.get());
212   {
213     std::unique_lock<std::mutex> Lock(Mutex);
214     Ready = true;
215     Cv.notify_all();
216   }
217   for (auto &T : Threads)
218     T.join();
219   Allocator->releaseToOS();
220 }
221 
222 TEST(ScudoCombinedTest, ThreadedCombined) {
223   UseQuarantine = false;
224   testAllocatorThreaded<scudo::AndroidSvelteConfig>();
225 #if SCUDO_FUCHSIA
226   testAllocatorThreaded<scudo::FuchsiaConfig>();
227 #else
228   testAllocatorThreaded<scudo::DefaultConfig>();
229   UseQuarantine = true;
230   testAllocatorThreaded<scudo::AndroidConfig>();
231 #endif
232 }
233 
234 struct DeathConfig {
235   // Tiny allocator, its Primary only serves chunks of 1024 bytes.
236   using DeathSizeClassMap = scudo::SizeClassMap<1U, 10U, 10U, 10U, 1U, 10U>;
237   typedef scudo::SizeClassAllocator64<DeathSizeClassMap, 20U> Primary;
238   typedef scudo::MapAllocator<0U> Secondary;
239   template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U>;
240 };
241 
242 TEST(ScudoCombinedTest, DeathCombined) {
243   using AllocatorT = scudo::Allocator<DeathConfig>;
244   auto Deleter = [](AllocatorT *A) {
245     A->unmapTestOnly();
246     delete A;
247   };
248   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
249                                                            Deleter);
250   Allocator->reset();
251 
252   const scudo::uptr Size = 1000U;
253   void *P = Allocator->allocate(Size, Origin);
254   EXPECT_NE(P, nullptr);
255 
256   // Invalid sized deallocation.
257   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
258 
259   // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
260   UNUSED void *MisalignedP =
261       reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
262   EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
263   EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
264 
265   // Header corruption.
266   scudo::u64 *H =
267       reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
268   *H ^= 0x42U;
269   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
270   *H ^= 0x420042U;
271   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
272   *H ^= 0x420000U;
273 
274   // Invalid chunk state.
275   Allocator->deallocate(P, Origin, Size);
276   EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
277   EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
278   EXPECT_DEATH(Allocator->getUsableSize(P), "");
279 }
280 
281 // Ensure that releaseToOS can be called prior to any other allocator
282 // operation without issue.
283 TEST(ScudoCombinedTest, ReleaseToOS) {
284   using AllocatorT = scudo::Allocator<DeathConfig>;
285   auto Deleter = [](AllocatorT *A) {
286     A->unmapTestOnly();
287     delete A;
288   };
289   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
290                                                            Deleter);
291   Allocator->reset();
292 
293   Allocator->releaseToOS();
294 }
295