xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "tests/scudo_unit_test.h"
10 
11 #include <errno.h>
12 #include <limits.h>
13 #include <malloc.h>
14 #include <stdlib.h>
15 #include <unistd.h>
16 
17 extern "C" {
18 void malloc_enable(void);
19 void malloc_disable(void);
20 int malloc_iterate(uintptr_t base, size_t size,
21                    void (*callback)(uintptr_t base, size_t size, void *arg),
22                    void *arg);
23 void *valloc(size_t size);
24 void *pvalloc(size_t size);
25 }
26 
27 // Note that every C allocation function in the test binary will be fulfilled
28 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
29 // But this might also lead to unexpected side-effects, since the allocation and
30 // deallocation operations in the TEST functions will coexist with others (see
31 // the EXPECT_DEATH comment below).
32 
33 // We have to use a small quarantine to make sure that our double-free tests
34 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
35 // freed (this depends on the size obviously) and the following free succeeds.
36 
37 static const size_t Size = 100U;
38 
39 TEST(ScudoWrappersCTest, Malloc) {
40   void *P = malloc(Size);
41   EXPECT_NE(P, nullptr);
42   EXPECT_LE(Size, malloc_usable_size(P));
43   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
44   EXPECT_DEATH(
45       free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
46   free(P);
47   EXPECT_DEATH(free(P), "");
48 
49   P = malloc(0U);
50   EXPECT_NE(P, nullptr);
51   free(P);
52 
53   errno = 0;
54   EXPECT_EQ(malloc(SIZE_MAX), nullptr);
55   EXPECT_EQ(errno, ENOMEM);
56 }
57 
58 TEST(ScudoWrappersCTest, Calloc) {
59   void *P = calloc(1U, Size);
60   EXPECT_NE(P, nullptr);
61   EXPECT_LE(Size, malloc_usable_size(P));
62   for (size_t I = 0; I < Size; I++)
63     EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
64   free(P);
65 
66   P = calloc(1U, 0U);
67   EXPECT_NE(P, nullptr);
68   free(P);
69   P = calloc(0U, 1U);
70   EXPECT_NE(P, nullptr);
71   free(P);
72 
73   errno = 0;
74   EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
75   EXPECT_EQ(errno, ENOMEM);
76   errno = 0;
77   EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
78   if (SCUDO_ANDROID)
79     EXPECT_EQ(errno, ENOMEM);
80   errno = 0;
81   EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
82   EXPECT_EQ(errno, ENOMEM);
83 }
84 
85 TEST(ScudoWrappersCTest, Memalign) {
86   void *P;
87   for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
88     const size_t Alignment = 1U << I;
89 
90     P = memalign(Alignment, Size);
91     EXPECT_NE(P, nullptr);
92     EXPECT_LE(Size, malloc_usable_size(P));
93     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
94     free(P);
95 
96     P = nullptr;
97     EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
98     EXPECT_NE(P, nullptr);
99     EXPECT_LE(Size, malloc_usable_size(P));
100     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
101     free(P);
102   }
103 
104   EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
105   EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
106   EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
107 
108   // Android's memalign accepts non power-of-2 alignments, and 0.
109   if (SCUDO_ANDROID) {
110     for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
111       P = memalign(Alignment, 1024U);
112       EXPECT_NE(P, nullptr);
113       free(P);
114     }
115   }
116 }
117 
118 TEST(ScudoWrappersCTest, AlignedAlloc) {
119   const size_t Alignment = 4096U;
120   void *P = aligned_alloc(Alignment, Alignment * 4U);
121   EXPECT_NE(P, nullptr);
122   EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
123   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
124   free(P);
125 
126   errno = 0;
127   P = aligned_alloc(Alignment, Size);
128   EXPECT_EQ(P, nullptr);
129   EXPECT_EQ(errno, EINVAL);
130 }
131 
132 TEST(ScudoWrappersCTest, Realloc) {
133   // realloc(nullptr, N) is malloc(N)
134   void *P = realloc(nullptr, 0U);
135   EXPECT_NE(P, nullptr);
136   free(P);
137 
138   P = malloc(Size);
139   EXPECT_NE(P, nullptr);
140   // realloc(P, 0U) is free(P) and returns nullptr
141   EXPECT_EQ(realloc(P, 0U), nullptr);
142 
143   P = malloc(Size);
144   EXPECT_NE(P, nullptr);
145   EXPECT_LE(Size, malloc_usable_size(P));
146   memset(P, 0x42, Size);
147 
148   P = realloc(P, Size * 2U);
149   EXPECT_NE(P, nullptr);
150   EXPECT_LE(Size * 2U, malloc_usable_size(P));
151   for (size_t I = 0; I < Size; I++)
152     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
153 
154   P = realloc(P, Size / 2U);
155   EXPECT_NE(P, nullptr);
156   EXPECT_LE(Size / 2U, malloc_usable_size(P));
157   for (size_t I = 0; I < Size / 2U; I++)
158     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
159   free(P);
160 
161   EXPECT_DEATH(P = realloc(P, Size), "");
162 
163   errno = 0;
164   EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
165   EXPECT_EQ(errno, ENOMEM);
166   P = malloc(Size);
167   EXPECT_NE(P, nullptr);
168   errno = 0;
169   EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
170   EXPECT_EQ(errno, ENOMEM);
171   free(P);
172 
173   // Android allows realloc of memalign pointers.
174   if (SCUDO_ANDROID) {
175     const size_t Alignment = 1024U;
176     P = memalign(Alignment, Size);
177     EXPECT_NE(P, nullptr);
178     EXPECT_LE(Size, malloc_usable_size(P));
179     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
180     memset(P, 0x42, Size);
181 
182     P = realloc(P, Size * 2U);
183     EXPECT_NE(P, nullptr);
184     EXPECT_LE(Size * 2U, malloc_usable_size(P));
185     for (size_t I = 0; I < Size; I++)
186       EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
187     free(P);
188   }
189 }
190 
191 #ifndef M_DECAY_TIME
192 #define M_DECAY_TIME -100
193 #endif
194 
195 #ifndef M_PURGE
196 #define M_PURGE -101
197 #endif
198 
199 #if !SCUDO_FUCHSIA
200 TEST(ScudoWrappersCTest, MallOpt) {
201   errno = 0;
202   EXPECT_EQ(mallopt(-1000, 1), 0);
203   // mallopt doesn't set errno.
204   EXPECT_EQ(errno, 0);
205 
206   EXPECT_EQ(mallopt(M_PURGE, 0), 1);
207 
208   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
209   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
210   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
211   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
212 }
213 #endif
214 
215 TEST(ScudoWrappersCTest, OtherAlloc) {
216 #if !SCUDO_FUCHSIA
217   const size_t PageSize = sysconf(_SC_PAGESIZE);
218 
219   void *P = pvalloc(Size);
220   EXPECT_NE(P, nullptr);
221   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
222   EXPECT_LE(PageSize, malloc_usable_size(P));
223   free(P);
224 
225   EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
226 
227   P = pvalloc(Size);
228   EXPECT_NE(P, nullptr);
229   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
230   free(P);
231 #endif
232 
233   EXPECT_EQ(valloc(SIZE_MAX), nullptr);
234 }
235 
236 #if !SCUDO_FUCHSIA
237 TEST(ScudoWrappersCTest, MallInfo) {
238   const size_t BypassQuarantineSize = 1024U;
239 
240   struct mallinfo MI = mallinfo();
241   size_t Allocated = MI.uordblks;
242   void *P = malloc(BypassQuarantineSize);
243   EXPECT_NE(P, nullptr);
244   MI = mallinfo();
245   EXPECT_GE(static_cast<size_t>(MI.uordblks), Allocated + BypassQuarantineSize);
246   EXPECT_GT(static_cast<size_t>(MI.hblkhd), 0U);
247   size_t Free = MI.fordblks;
248   free(P);
249   MI = mallinfo();
250   EXPECT_GE(static_cast<size_t>(MI.fordblks), Free + BypassQuarantineSize);
251 }
252 #endif
253 
254 static uintptr_t BoundaryP;
255 static size_t Count;
256 
257 static void callback(uintptr_t Base, size_t Size, void *Arg) {
258   if (Base == BoundaryP)
259     Count++;
260 }
261 
262 // Verify that a block located on an iteration boundary is not mis-accounted.
263 // To achieve this, we allocate a chunk for which the backing block will be
264 // aligned on a page, then run the malloc_iterate on both the pages that the
265 // block is a boundary for. It must only be seen once by the callback function.
266 TEST(ScudoWrappersCTest, MallocIterateBoundary) {
267   const size_t PageSize = sysconf(_SC_PAGESIZE);
268   const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
269   const size_t SpecialSize = PageSize - BlockDelta;
270 
271   void *P = malloc(SpecialSize);
272   EXPECT_NE(P, nullptr);
273   BoundaryP = reinterpret_cast<uintptr_t>(P);
274   const uintptr_t Block = BoundaryP - BlockDelta;
275   EXPECT_EQ((Block & (PageSize - 1)), 0U);
276 
277   Count = 0U;
278   malloc_disable();
279   malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
280   malloc_iterate(Block, PageSize, callback, nullptr);
281   malloc_enable();
282   EXPECT_EQ(Count, 1U);
283 
284   free(P);
285 }
286 
287 // We expect heap operations within a disable/enable scope to deadlock.
288 TEST(ScudoWrappersCTest, MallocDisableDeadlock) {
289   EXPECT_DEATH(
290       {
291         void *P = malloc(Size);
292         EXPECT_NE(P, nullptr);
293         free(P);
294         malloc_disable();
295         alarm(1);
296         P = malloc(Size);
297         malloc_enable();
298       },
299       "");
300 }
301 
302 // Fuchsia doesn't have fork or malloc_info.
303 #if !SCUDO_FUCHSIA
304 
305 TEST(ScudoWrappersCTest, MallocInfo) {
306   char Buffer[64];
307   FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
308   EXPECT_NE(F, nullptr);
309   errno = 0;
310   EXPECT_EQ(malloc_info(0, F), 0);
311   EXPECT_EQ(errno, 0);
312   fclose(F);
313   EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
314 }
315 
316 TEST(ScudoWrappersCTest, Fork) {
317   void *P;
318   pid_t Pid = fork();
319   EXPECT_GE(Pid, 0);
320   if (Pid == 0) {
321     P = malloc(Size);
322     EXPECT_NE(P, nullptr);
323     memset(P, 0x42, Size);
324     free(P);
325     _exit(0);
326   }
327   waitpid(Pid, nullptr, 0);
328   P = malloc(Size);
329   EXPECT_NE(P, nullptr);
330   memset(P, 0x42, Size);
331   free(P);
332 
333   // fork should stall if the allocator has been disabled.
334   EXPECT_DEATH(
335       {
336         malloc_disable();
337         alarm(1);
338         Pid = fork();
339         EXPECT_GE(Pid, 0);
340       },
341       "");
342 }
343 
344 static pthread_mutex_t Mutex;
345 static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
346 
347 static void *enableMalloc(void *Unused) {
348   // Initialize the allocator for this thread.
349   void *P = malloc(Size);
350   EXPECT_NE(P, nullptr);
351   memset(P, 0x42, Size);
352   free(P);
353 
354   // Signal the main thread we are ready.
355   pthread_mutex_lock(&Mutex);
356   pthread_cond_signal(&Conditional);
357   pthread_mutex_unlock(&Mutex);
358 
359   // Wait for the malloc_disable & fork, then enable the allocator again.
360   sleep(1);
361   malloc_enable();
362 
363   return nullptr;
364 }
365 
366 TEST(ScudoWrappersCTest, DisableForkEnable) {
367   pthread_t ThreadId;
368   EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
369 
370   // Wait for the thread to be warmed up.
371   pthread_mutex_lock(&Mutex);
372   pthread_cond_wait(&Conditional, &Mutex);
373   pthread_mutex_unlock(&Mutex);
374 
375   // Disable the allocator and fork. fork should succeed after malloc_enable.
376   malloc_disable();
377   pid_t Pid = fork();
378   EXPECT_GE(Pid, 0);
379   if (Pid == 0) {
380     void *P = malloc(Size);
381     EXPECT_NE(P, nullptr);
382     memset(P, 0x42, Size);
383     free(P);
384     _exit(0);
385   }
386   waitpid(Pid, nullptr, 0);
387   EXPECT_EQ(pthread_join(ThreadId, 0), 0);
388 }
389 
390 #endif // SCUDO_FUCHSIA
391