xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
1 //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include "tsd_exclusive.h"
12 #include "tsd_shared.h"
13 
14 #include <stdlib.h>
15 
16 #include <condition_variable>
17 #include <mutex>
18 #include <set>
19 #include <thread>
20 
21 // We mock out an allocator with a TSD registry, mostly using empty stubs. The
22 // cache contains a single volatile uptr, to be able to test that several
23 // concurrent threads will not access or modify the same cache at the same time.
24 template <class Config> class MockAllocator {
25 public:
26   using ThisT = MockAllocator<Config>;
27   using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
28   using CacheT = struct MockCache {
29     volatile scudo::uptr Canary;
30   };
31   using QuarantineCacheT = struct MockQuarantine {};
32 
init()33   void init() {
34     // This should only be called once by the registry.
35     EXPECT_FALSE(Initialized);
36     Initialized = true;
37   }
38 
unmapTestOnly()39   void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
initCache(CacheT * Cache)40   void initCache(CacheT *Cache) { *Cache = {}; }
commitBack(scudo::TSD<MockAllocator> * TSD)41   void commitBack(scudo::TSD<MockAllocator> *TSD) {}
getTSDRegistry()42   TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
callPostInitCallback()43   void callPostInitCallback() {}
44 
isInitialized()45   bool isInitialized() { return Initialized; }
46 
operator new(size_t Size)47   void *operator new(size_t Size) {
48     void *P = nullptr;
49     EXPECT_EQ(0, posix_memalign(&P, alignof(ThisT), Size));
50     return P;
51   }
operator delete(void * P)52   void operator delete(void *P) { free(P); }
53 
54 private:
55   bool Initialized = false;
56   TSDRegistryT TSDRegistry;
57 };
58 
59 struct OneCache {
60   template <class Allocator>
61   using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>;
62 };
63 
64 struct SharedCaches {
65   template <class Allocator>
66   using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>;
67 };
68 
69 struct ExclusiveCaches {
70   template <class Allocator>
71   using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
72 };
73 
TEST(ScudoTSDTest,TSDRegistryInit)74 TEST(ScudoTSDTest, TSDRegistryInit) {
75   using AllocatorT = MockAllocator<OneCache>;
76   auto Deleter = [](AllocatorT *A) {
77     A->unmapTestOnly();
78     delete A;
79   };
80   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
81                                                            Deleter);
82   EXPECT_FALSE(Allocator->isInitialized());
83 
84   auto Registry = Allocator->getTSDRegistry();
85   Registry->init(Allocator.get());
86   EXPECT_TRUE(Allocator->isInitialized());
87 }
88 
testRegistry()89 template <class AllocatorT> static void testRegistry() {
90   auto Deleter = [](AllocatorT *A) {
91     A->unmapTestOnly();
92     delete A;
93   };
94   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
95                                                            Deleter);
96   EXPECT_FALSE(Allocator->isInitialized());
97 
98   auto Registry = Allocator->getTSDRegistry();
99   Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
100   EXPECT_TRUE(Allocator->isInitialized());
101 
102   bool UnlockRequired;
103   auto TSD = Registry->getTSDAndLock(&UnlockRequired);
104   EXPECT_NE(TSD, nullptr);
105   EXPECT_EQ(TSD->Cache.Canary, 0U);
106   if (UnlockRequired)
107     TSD->unlock();
108 
109   Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
110   TSD = Registry->getTSDAndLock(&UnlockRequired);
111   EXPECT_NE(TSD, nullptr);
112   EXPECT_EQ(TSD->Cache.Canary, 0U);
113   memset(&TSD->Cache, 0x42, sizeof(TSD->Cache));
114   if (UnlockRequired)
115     TSD->unlock();
116 }
117 
TEST(ScudoTSDTest,TSDRegistryBasic)118 TEST(ScudoTSDTest, TSDRegistryBasic) {
119   testRegistry<MockAllocator<OneCache>>();
120   testRegistry<MockAllocator<SharedCaches>>();
121 #if !SCUDO_FUCHSIA
122   testRegistry<MockAllocator<ExclusiveCaches>>();
123 #endif
124 }
125 
126 static std::mutex Mutex;
127 static std::condition_variable Cv;
128 static bool Ready;
129 
stressCache(AllocatorT * Allocator)130 template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
131   auto Registry = Allocator->getTSDRegistry();
132   {
133     std::unique_lock<std::mutex> Lock(Mutex);
134     while (!Ready)
135       Cv.wait(Lock);
136   }
137   Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
138   bool UnlockRequired;
139   auto TSD = Registry->getTSDAndLock(&UnlockRequired);
140   EXPECT_NE(TSD, nullptr);
141   // For an exclusive TSD, the cache should be empty. We cannot guarantee the
142   // same for a shared TSD.
143   if (!UnlockRequired)
144     EXPECT_EQ(TSD->Cache.Canary, 0U);
145   // Transform the thread id to a uptr to use it as canary.
146   const scudo::uptr Canary = static_cast<scudo::uptr>(
147       std::hash<std::thread::id>{}(std::this_thread::get_id()));
148   TSD->Cache.Canary = Canary;
149   // Loop a few times to make sure that a concurrent thread isn't modifying it.
150   for (scudo::uptr I = 0; I < 4096U; I++)
151     EXPECT_EQ(TSD->Cache.Canary, Canary);
152   if (UnlockRequired)
153     TSD->unlock();
154 }
155 
testRegistryThreaded()156 template <class AllocatorT> static void testRegistryThreaded() {
157   Ready = false;
158   auto Deleter = [](AllocatorT *A) {
159     A->unmapTestOnly();
160     delete A;
161   };
162   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
163                                                            Deleter);
164   std::thread Threads[32];
165   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
166     Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
167   {
168     std::unique_lock<std::mutex> Lock(Mutex);
169     Ready = true;
170     Cv.notify_all();
171   }
172   for (auto &T : Threads)
173     T.join();
174 }
175 
TEST(ScudoTSDTest,TSDRegistryThreaded)176 TEST(ScudoTSDTest, TSDRegistryThreaded) {
177   testRegistryThreaded<MockAllocator<OneCache>>();
178   testRegistryThreaded<MockAllocator<SharedCaches>>();
179 #if !SCUDO_FUCHSIA
180   testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
181 #endif
182 }
183 
184 static std::set<void *> Pointers;
185 
stressSharedRegistry(MockAllocator<SharedCaches> * Allocator)186 static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
187   std::set<void *> Set;
188   auto Registry = Allocator->getTSDRegistry();
189   {
190     std::unique_lock<std::mutex> Lock(Mutex);
191     while (!Ready)
192       Cv.wait(Lock);
193   }
194   Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
195   bool UnlockRequired;
196   for (scudo::uptr I = 0; I < 4096U; I++) {
197     auto TSD = Registry->getTSDAndLock(&UnlockRequired);
198     EXPECT_NE(TSD, nullptr);
199     Set.insert(reinterpret_cast<void *>(TSD));
200     if (UnlockRequired)
201       TSD->unlock();
202   }
203   {
204     std::unique_lock<std::mutex> Lock(Mutex);
205     Pointers.insert(Set.begin(), Set.end());
206   }
207 }
208 
TEST(ScudoTSDTest,TSDRegistryTSDsCount)209 TEST(ScudoTSDTest, TSDRegistryTSDsCount) {
210   Ready = false;
211   Pointers.clear();
212   using AllocatorT = MockAllocator<SharedCaches>;
213   auto Deleter = [](AllocatorT *A) {
214     A->unmapTestOnly();
215     delete A;
216   };
217   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
218                                                            Deleter);
219   // We attempt to use as many TSDs as the shared cache offers by creating a
220   // decent amount of threads that will be run concurrently and attempt to get
221   // and lock TSDs. We put them all in a set and count the number of entries
222   // after we are done.
223   std::thread Threads[32];
224   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
225     Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
226   {
227     std::unique_lock<std::mutex> Lock(Mutex);
228     Ready = true;
229     Cv.notify_all();
230   }
231   for (auto &T : Threads)
232     T.join();
233   // The initial number of TSDs we get will be the minimum of the default count
234   // and the number of CPUs.
235   EXPECT_LE(Pointers.size(), 8U);
236   Pointers.clear();
237   auto Registry = Allocator->getTSDRegistry();
238   // Increase the number of TSDs to 16.
239   Registry->setOption(scudo::Option::MaxTSDsCount, 16);
240   Ready = false;
241   for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
242     Threads[I] = std::thread(stressSharedRegistry, Allocator.get());
243   {
244     std::unique_lock<std::mutex> Lock(Mutex);
245     Ready = true;
246     Cv.notify_all();
247   }
248   for (auto &T : Threads)
249     T.join();
250   // We should get 16 distinct TSDs back.
251   EXPECT_EQ(Pointers.size(), 16U);
252 }
253