xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include <errno.h>
12 #include <limits.h>
13 #include <malloc.h>
14 #include <stdlib.h>
15 #include <unistd.h>
16 
17 extern "C" {
18 void malloc_enable(void);
19 void malloc_disable(void);
20 int malloc_iterate(uintptr_t base, size_t size,
21                    void (*callback)(uintptr_t base, size_t size, void *arg),
22                    void *arg);
23 void *valloc(size_t size);
24 void *pvalloc(size_t size);
25 }
26 
27 // Note that every C allocation function in the test binary will be fulfilled
28 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
29 // But this might also lead to unexpected side-effects, since the allocation and
30 // deallocation operations in the TEST functions will coexist with others (see
31 // the EXPECT_DEATH comment below).
32 
33 // We have to use a small quarantine to make sure that our double-free tests
34 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
35 // freed (this depends on the size obviously) and the following free succeeds.
36 
37 static const size_t Size = 100U;
38 
39 TEST(ScudoWrappersCTest, Malloc) {
40   void *P = malloc(Size);
41   EXPECT_NE(P, nullptr);
42   EXPECT_LE(Size, malloc_usable_size(P));
43   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
44   EXPECT_DEATH(
45       free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
46   free(P);
47   EXPECT_DEATH(free(P), "");
48 
49   P = malloc(0U);
50   EXPECT_NE(P, nullptr);
51   free(P);
52 
53   errno = 0;
54   EXPECT_EQ(malloc(SIZE_MAX), nullptr);
55   EXPECT_EQ(errno, ENOMEM);
56 }
57 
58 TEST(ScudoWrappersCTest, Calloc) {
59   void *P = calloc(1U, Size);
60   EXPECT_NE(P, nullptr);
61   EXPECT_LE(Size, malloc_usable_size(P));
62   for (size_t I = 0; I < Size; I++)
63     EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
64   free(P);
65 
66   P = calloc(1U, 0U);
67   EXPECT_NE(P, nullptr);
68   free(P);
69   P = calloc(0U, 1U);
70   EXPECT_NE(P, nullptr);
71   free(P);
72 
73   errno = 0;
74   EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
75   EXPECT_EQ(errno, ENOMEM);
76   errno = 0;
77   EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
78   if (SCUDO_ANDROID)
79     EXPECT_EQ(errno, ENOMEM);
80   errno = 0;
81   EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
82   EXPECT_EQ(errno, ENOMEM);
83 }
84 
85 TEST(ScudoWrappersCTest, Memalign) {
86   void *P;
87   for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
88     const size_t Alignment = 1U << I;
89 
90     P = memalign(Alignment, Size);
91     EXPECT_NE(P, nullptr);
92     EXPECT_LE(Size, malloc_usable_size(P));
93     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
94     free(P);
95 
96     P = nullptr;
97     EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
98     EXPECT_NE(P, nullptr);
99     EXPECT_LE(Size, malloc_usable_size(P));
100     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
101     free(P);
102   }
103 
104   EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
105   EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
106   EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
107 
108   // Android's memalign accepts non power-of-2 alignments, and 0.
109   if (SCUDO_ANDROID) {
110     for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
111       P = memalign(Alignment, 1024U);
112       EXPECT_NE(P, nullptr);
113       free(P);
114     }
115   }
116 }
117 
118 TEST(ScudoWrappersCTest, AlignedAlloc) {
119   const size_t Alignment = 4096U;
120   void *P = aligned_alloc(Alignment, Alignment * 4U);
121   EXPECT_NE(P, nullptr);
122   EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
123   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
124   free(P);
125 
126   errno = 0;
127   P = aligned_alloc(Alignment, Size);
128   EXPECT_EQ(P, nullptr);
129   EXPECT_EQ(errno, EINVAL);
130 }
131 
132 TEST(ScudoWrappersCTest, Realloc) {
133   // realloc(nullptr, N) is malloc(N)
134   void *P = realloc(nullptr, 0U);
135   EXPECT_NE(P, nullptr);
136   free(P);
137 
138   P = malloc(Size);
139   EXPECT_NE(P, nullptr);
140   // realloc(P, 0U) is free(P) and returns nullptr
141   EXPECT_EQ(realloc(P, 0U), nullptr);
142 
143   P = malloc(Size);
144   EXPECT_NE(P, nullptr);
145   EXPECT_LE(Size, malloc_usable_size(P));
146   memset(P, 0x42, Size);
147 
148   P = realloc(P, Size * 2U);
149   EXPECT_NE(P, nullptr);
150   EXPECT_LE(Size * 2U, malloc_usable_size(P));
151   for (size_t I = 0; I < Size; I++)
152     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
153 
154   P = realloc(P, Size / 2U);
155   EXPECT_NE(P, nullptr);
156   EXPECT_LE(Size / 2U, malloc_usable_size(P));
157   for (size_t I = 0; I < Size / 2U; I++)
158     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
159   free(P);
160 
161   EXPECT_DEATH(P = realloc(P, Size), "");
162 
163   errno = 0;
164   EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
165   EXPECT_EQ(errno, ENOMEM);
166   P = malloc(Size);
167   EXPECT_NE(P, nullptr);
168   errno = 0;
169   EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
170   EXPECT_EQ(errno, ENOMEM);
171   free(P);
172 
173   // Android allows realloc of memalign pointers.
174   if (SCUDO_ANDROID) {
175     const size_t Alignment = 1024U;
176     P = memalign(Alignment, Size);
177     EXPECT_NE(P, nullptr);
178     EXPECT_LE(Size, malloc_usable_size(P));
179     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
180     memset(P, 0x42, Size);
181 
182     P = realloc(P, Size * 2U);
183     EXPECT_NE(P, nullptr);
184     EXPECT_LE(Size * 2U, malloc_usable_size(P));
185     for (size_t I = 0; I < Size; I++)
186       EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
187     free(P);
188   }
189 }
190 
191 #ifndef M_DECAY_TIME
192 #define M_DECAY_TIME -100
193 #endif
194 
195 #ifndef M_PURGE
196 #define M_PURGE -101
197 #endif
198 
199 #if !SCUDO_FUCHSIA
200 TEST(ScudoWrappersCTest, MallOpt) {
201   errno = 0;
202   EXPECT_EQ(mallopt(-1000, 1), 0);
203   // mallopt doesn't set errno.
204   EXPECT_EQ(errno, 0);
205 
206   EXPECT_EQ(mallopt(M_PURGE, 0), 1);
207 
208   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
209   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
210   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
211   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
212 }
213 #endif
214 
215 TEST(ScudoWrappersCTest, OtherAlloc) {
216 #if !SCUDO_FUCHSIA
217   const size_t PageSize = sysconf(_SC_PAGESIZE);
218 
219   void *P = pvalloc(Size);
220   EXPECT_NE(P, nullptr);
221   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
222   EXPECT_LE(PageSize, malloc_usable_size(P));
223   free(P);
224 
225   EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
226 
227   P = pvalloc(Size);
228   EXPECT_NE(P, nullptr);
229   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
230   free(P);
231 #endif
232 
233   EXPECT_EQ(valloc(SIZE_MAX), nullptr);
234 }
235 
236 #if !SCUDO_FUCHSIA
237 TEST(ScudoWrappersCTest, MallInfo) {
238   const size_t BypassQuarantineSize = 1024U;
239 
240   struct mallinfo MI = mallinfo();
241   size_t Allocated = MI.uordblks;
242   void *P = malloc(BypassQuarantineSize);
243   EXPECT_NE(P, nullptr);
244   MI = mallinfo();
245   EXPECT_GE(static_cast<size_t>(MI.uordblks), Allocated + BypassQuarantineSize);
246   EXPECT_GT(static_cast<size_t>(MI.hblkhd), 0U);
247   size_t Free = MI.fordblks;
248   free(P);
249   MI = mallinfo();
250   EXPECT_GE(static_cast<size_t>(MI.fordblks), Free + BypassQuarantineSize);
251 }
252 #endif
253 
254 static uintptr_t BoundaryP;
255 static size_t Count;
256 
257 static void callback(uintptr_t Base, size_t Size, void *Arg) {
258   if (Base == BoundaryP)
259     Count++;
260 }
261 
262 // Verify that a block located on an iteration boundary is not mis-accounted.
263 // To achieve this, we allocate a chunk for which the backing block will be
264 // aligned on a page, then run the malloc_iterate on both the pages that the
265 // block is a boundary for. It must only be seen once by the callback function.
266 TEST(ScudoWrappersCTest, MallocIterateBoundary) {
267   const size_t PageSize = sysconf(_SC_PAGESIZE);
268   const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
269   const size_t SpecialSize = PageSize - BlockDelta;
270 
271   // We aren't guaranteed that any size class is exactly a page wide. So we need
272   // to keep making allocations until we succeed.
273   //
274   // With a 16-byte block alignment and 4096-byte page size, each allocation has
275   // a probability of (1 - (16/4096)) of failing to meet the alignment
276   // requirements, and the probability of failing 65536 times is
277   // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
278   // 65536 tries, give up.
279   uintptr_t Block;
280   void *P = nullptr;
281   for (unsigned I = 0; I != 65536; ++I) {
282     void *PrevP = P;
283     P = malloc(SpecialSize);
284     EXPECT_NE(P, nullptr);
285     *reinterpret_cast<void **>(P) = PrevP;
286     BoundaryP = reinterpret_cast<uintptr_t>(P);
287     Block = BoundaryP - BlockDelta;
288     if ((Block & (PageSize - 1)) == 0U)
289       break;
290   }
291   EXPECT_EQ((Block & (PageSize - 1)), 0U);
292 
293   Count = 0U;
294   malloc_disable();
295   malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
296   malloc_iterate(Block, PageSize, callback, nullptr);
297   malloc_enable();
298   EXPECT_EQ(Count, 1U);
299 
300   while (P) {
301     void *NextP = *reinterpret_cast<void **>(P);
302     free(P);
303     P = NextP;
304   }
305 }
306 
307 // We expect heap operations within a disable/enable scope to deadlock.
308 TEST(ScudoWrappersCTest, MallocDisableDeadlock) {
309   EXPECT_DEATH(
310       {
311         void *P = malloc(Size);
312         EXPECT_NE(P, nullptr);
313         free(P);
314         malloc_disable();
315         alarm(1);
316         P = malloc(Size);
317         malloc_enable();
318       },
319       "");
320 }
321 
322 // Fuchsia doesn't have fork or malloc_info.
323 #if !SCUDO_FUCHSIA
324 
325 TEST(ScudoWrappersCTest, MallocInfo) {
326   // Use volatile so that the allocations don't get optimized away.
327   void *volatile P1 = malloc(1234);
328   void *volatile P2 = malloc(4321);
329 
330   char Buffer[16384];
331   FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
332   EXPECT_NE(F, nullptr);
333   errno = 0;
334   EXPECT_EQ(malloc_info(0, F), 0);
335   EXPECT_EQ(errno, 0);
336   fclose(F);
337   EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
338   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
339   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
340 
341   free(P1);
342   free(P2);
343 }
344 
345 TEST(ScudoWrappersCTest, Fork) {
346   void *P;
347   pid_t Pid = fork();
348   EXPECT_GE(Pid, 0);
349   if (Pid == 0) {
350     P = malloc(Size);
351     EXPECT_NE(P, nullptr);
352     memset(P, 0x42, Size);
353     free(P);
354     _exit(0);
355   }
356   waitpid(Pid, nullptr, 0);
357   P = malloc(Size);
358   EXPECT_NE(P, nullptr);
359   memset(P, 0x42, Size);
360   free(P);
361 
362   // fork should stall if the allocator has been disabled.
363   EXPECT_DEATH(
364       {
365         malloc_disable();
366         alarm(1);
367         Pid = fork();
368         EXPECT_GE(Pid, 0);
369       },
370       "");
371 }
372 
373 static pthread_mutex_t Mutex;
374 static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
375 static bool Ready;
376 
377 static void *enableMalloc(void *Unused) {
378   // Initialize the allocator for this thread.
379   void *P = malloc(Size);
380   EXPECT_NE(P, nullptr);
381   memset(P, 0x42, Size);
382   free(P);
383 
384   // Signal the main thread we are ready.
385   pthread_mutex_lock(&Mutex);
386   Ready = true;
387   pthread_cond_signal(&Conditional);
388   pthread_mutex_unlock(&Mutex);
389 
390   // Wait for the malloc_disable & fork, then enable the allocator again.
391   sleep(1);
392   malloc_enable();
393 
394   return nullptr;
395 }
396 
397 TEST(ScudoWrappersCTest, DisableForkEnable) {
398   pthread_t ThreadId;
399   EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
400 
401   // Wait for the thread to be warmed up.
402   pthread_mutex_lock(&Mutex);
403   while (!Ready)
404     pthread_cond_wait(&Conditional, &Mutex);
405   pthread_mutex_unlock(&Mutex);
406 
407   // Disable the allocator and fork. fork should succeed after malloc_enable.
408   malloc_disable();
409   pid_t Pid = fork();
410   EXPECT_GE(Pid, 0);
411   if (Pid == 0) {
412     void *P = malloc(Size);
413     EXPECT_NE(P, nullptr);
414     memset(P, 0x42, Size);
415     free(P);
416     _exit(0);
417   }
418   waitpid(Pid, nullptr, 0);
419   EXPECT_EQ(pthread_join(ThreadId, 0), 0);
420 }
421 
422 #endif // SCUDO_FUCHSIA
423