xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "memtag.h"
10 #include "scudo/interface.h"
11 #include "tests/scudo_unit_test.h"
12 
13 #include <errno.h>
14 #include <limits.h>
15 #include <malloc.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 
19 #ifndef __GLIBC_PREREQ
20 #define __GLIBC_PREREQ(x, y) 0
21 #endif
22 
23 extern "C" {
24 void malloc_enable(void);
25 void malloc_disable(void);
26 int malloc_iterate(uintptr_t base, size_t size,
27                    void (*callback)(uintptr_t base, size_t size, void *arg),
28                    void *arg);
29 void *valloc(size_t size);
30 void *pvalloc(size_t size);
31 }
32 
33 // Note that every C allocation function in the test binary will be fulfilled
34 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
35 // But this might also lead to unexpected side-effects, since the allocation and
36 // deallocation operations in the TEST functions will coexist with others (see
37 // the EXPECT_DEATH comment below).
38 
39 // We have to use a small quarantine to make sure that our double-free tests
40 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
41 // freed (this depends on the size obviously) and the following free succeeds.
42 
43 static const size_t Size = 100U;
44 
TEST(ScudoWrappersCDeathTest,Malloc)45 TEST(ScudoWrappersCDeathTest, Malloc) {
46   void *P = malloc(Size);
47   EXPECT_NE(P, nullptr);
48   EXPECT_LE(Size, malloc_usable_size(P));
49   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
50 
51   // An update to this warning in Clang now triggers in this line, but it's ok
52   // because the check is expecting a bad pointer and should fail.
53 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
54 #pragma GCC diagnostic push
55 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
56 #endif
57   EXPECT_DEATH(
58       free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
59 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
60 #pragma GCC diagnostic pop
61 #endif
62 
63   free(P);
64   EXPECT_DEATH(free(P), "");
65 
66   P = malloc(0U);
67   EXPECT_NE(P, nullptr);
68   free(P);
69 
70   errno = 0;
71   EXPECT_EQ(malloc(SIZE_MAX), nullptr);
72   EXPECT_EQ(errno, ENOMEM);
73 }
74 
TEST(ScudoWrappersCTest,Calloc)75 TEST(ScudoWrappersCTest, Calloc) {
76   void *P = calloc(1U, Size);
77   EXPECT_NE(P, nullptr);
78   EXPECT_LE(Size, malloc_usable_size(P));
79   for (size_t I = 0; I < Size; I++)
80     EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
81   free(P);
82 
83   P = calloc(1U, 0U);
84   EXPECT_NE(P, nullptr);
85   free(P);
86   P = calloc(0U, 1U);
87   EXPECT_NE(P, nullptr);
88   free(P);
89 
90   errno = 0;
91   EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
92   EXPECT_EQ(errno, ENOMEM);
93   errno = 0;
94   EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
95   if (SCUDO_ANDROID)
96     EXPECT_EQ(errno, ENOMEM);
97   errno = 0;
98   EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
99   EXPECT_EQ(errno, ENOMEM);
100 }
101 
TEST(ScudoWrappersCTest,SmallAlign)102 TEST(ScudoWrappersCTest, SmallAlign) {
103   void *P;
104   for (size_t Size = 1; Size <= 0x10000; Size <<= 1) {
105     for (size_t Align = 1; Align <= 0x10000; Align <<= 1) {
106       for (size_t Count = 0; Count < 3; ++Count) {
107         P = memalign(Align, Size);
108         EXPECT_TRUE(reinterpret_cast<uintptr_t>(P) % Align == 0);
109       }
110     }
111   }
112 }
113 
TEST(ScudoWrappersCTest,Memalign)114 TEST(ScudoWrappersCTest, Memalign) {
115   void *P;
116   for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
117     const size_t Alignment = 1U << I;
118 
119     P = memalign(Alignment, Size);
120     EXPECT_NE(P, nullptr);
121     EXPECT_LE(Size, malloc_usable_size(P));
122     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
123     free(P);
124 
125     P = nullptr;
126     EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
127     EXPECT_NE(P, nullptr);
128     EXPECT_LE(Size, malloc_usable_size(P));
129     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
130     free(P);
131   }
132 
133   EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
134   EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
135   EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
136 
137   // Android's memalign accepts non power-of-2 alignments, and 0.
138   if (SCUDO_ANDROID) {
139     for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
140       P = memalign(Alignment, 1024U);
141       EXPECT_NE(P, nullptr);
142       free(P);
143     }
144   }
145 }
146 
TEST(ScudoWrappersCTest,AlignedAlloc)147 TEST(ScudoWrappersCTest, AlignedAlloc) {
148   const size_t Alignment = 4096U;
149   void *P = aligned_alloc(Alignment, Alignment * 4U);
150   EXPECT_NE(P, nullptr);
151   EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
152   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
153   free(P);
154 
155   errno = 0;
156   P = aligned_alloc(Alignment, Size);
157   EXPECT_EQ(P, nullptr);
158   EXPECT_EQ(errno, EINVAL);
159 }
160 
TEST(ScudoWrappersCDeathTest,Realloc)161 TEST(ScudoWrappersCDeathTest, Realloc) {
162   // realloc(nullptr, N) is malloc(N)
163   void *P = realloc(nullptr, 0U);
164   EXPECT_NE(P, nullptr);
165   free(P);
166 
167   P = malloc(Size);
168   EXPECT_NE(P, nullptr);
169   // realloc(P, 0U) is free(P) and returns nullptr
170   EXPECT_EQ(realloc(P, 0U), nullptr);
171 
172   P = malloc(Size);
173   EXPECT_NE(P, nullptr);
174   EXPECT_LE(Size, malloc_usable_size(P));
175   memset(P, 0x42, Size);
176 
177   P = realloc(P, Size * 2U);
178   EXPECT_NE(P, nullptr);
179   EXPECT_LE(Size * 2U, malloc_usable_size(P));
180   for (size_t I = 0; I < Size; I++)
181     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
182 
183   P = realloc(P, Size / 2U);
184   EXPECT_NE(P, nullptr);
185   EXPECT_LE(Size / 2U, malloc_usable_size(P));
186   for (size_t I = 0; I < Size / 2U; I++)
187     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
188   free(P);
189 
190   EXPECT_DEATH(P = realloc(P, Size), "");
191 
192   errno = 0;
193   EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
194   EXPECT_EQ(errno, ENOMEM);
195   P = malloc(Size);
196   EXPECT_NE(P, nullptr);
197   errno = 0;
198   EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
199   EXPECT_EQ(errno, ENOMEM);
200   free(P);
201 
202   // Android allows realloc of memalign pointers.
203   if (SCUDO_ANDROID) {
204     const size_t Alignment = 1024U;
205     P = memalign(Alignment, Size);
206     EXPECT_NE(P, nullptr);
207     EXPECT_LE(Size, malloc_usable_size(P));
208     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
209     memset(P, 0x42, Size);
210 
211     P = realloc(P, Size * 2U);
212     EXPECT_NE(P, nullptr);
213     EXPECT_LE(Size * 2U, malloc_usable_size(P));
214     for (size_t I = 0; I < Size; I++)
215       EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
216     free(P);
217   }
218 }
219 
220 #if !SCUDO_FUCHSIA
TEST(ScudoWrappersCTest,MallOpt)221 TEST(ScudoWrappersCTest, MallOpt) {
222   errno = 0;
223   EXPECT_EQ(mallopt(-1000, 1), 0);
224   // mallopt doesn't set errno.
225   EXPECT_EQ(errno, 0);
226 
227   EXPECT_EQ(mallopt(M_PURGE, 0), 1);
228 
229   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
230   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
231   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
232   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
233 
234   if (SCUDO_ANDROID) {
235     EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX, 100), 1);
236     EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2), 1);
237     EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX, 10), 1);
238   }
239 }
240 #endif
241 
TEST(ScudoWrappersCTest,OtherAlloc)242 TEST(ScudoWrappersCTest, OtherAlloc) {
243 #if !SCUDO_FUCHSIA
244   const size_t PageSize = sysconf(_SC_PAGESIZE);
245 
246   void *P = pvalloc(Size);
247   EXPECT_NE(P, nullptr);
248   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
249   EXPECT_LE(PageSize, malloc_usable_size(P));
250   free(P);
251 
252   EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
253 
254   P = pvalloc(Size);
255   EXPECT_NE(P, nullptr);
256   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
257   free(P);
258 #endif
259 
260   EXPECT_EQ(valloc(SIZE_MAX), nullptr);
261 }
262 
263 #if !SCUDO_FUCHSIA
TEST(ScudoWrappersCTest,MallInfo)264 TEST(ScudoWrappersCTest, MallInfo) {
265   // mallinfo is deprecated.
266 #pragma clang diagnostic push
267 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
268   const size_t BypassQuarantineSize = 1024U;
269   struct mallinfo MI = mallinfo();
270   size_t Allocated = MI.uordblks;
271   void *P = malloc(BypassQuarantineSize);
272   EXPECT_NE(P, nullptr);
273   MI = mallinfo();
274   EXPECT_GE(static_cast<size_t>(MI.uordblks), Allocated + BypassQuarantineSize);
275   EXPECT_GT(static_cast<size_t>(MI.hblkhd), 0U);
276   size_t Free = MI.fordblks;
277   free(P);
278   MI = mallinfo();
279   EXPECT_GE(static_cast<size_t>(MI.fordblks), Free + BypassQuarantineSize);
280 #pragma clang diagnostic pop
281 }
282 #endif
283 
284 #if __GLIBC_PREREQ(2, 33)
TEST(ScudoWrappersCTest,MallInfo2)285 TEST(ScudoWrappersCTest, MallInfo2) {
286   const size_t BypassQuarantineSize = 1024U;
287   struct mallinfo2 MI = mallinfo2();
288   size_t Allocated = MI.uordblks;
289   void *P = malloc(BypassQuarantineSize);
290   EXPECT_NE(P, nullptr);
291   MI = mallinfo2();
292   EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
293   EXPECT_GT(MI.hblkhd, 0U);
294   size_t Free = MI.fordblks;
295   free(P);
296   MI = mallinfo2();
297   EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
298 }
299 #endif
300 
301 static uintptr_t BoundaryP;
302 static size_t Count;
303 
callback(uintptr_t Base,size_t Size,void * Arg)304 static void callback(uintptr_t Base, size_t Size, void *Arg) {
305   if (scudo::archSupportsMemoryTagging()) {
306     Base = scudo::untagPointer(Base);
307     BoundaryP = scudo::untagPointer(BoundaryP);
308   }
309   if (Base == BoundaryP)
310     Count++;
311 }
312 
313 // Verify that a block located on an iteration boundary is not mis-accounted.
314 // To achieve this, we allocate a chunk for which the backing block will be
315 // aligned on a page, then run the malloc_iterate on both the pages that the
316 // block is a boundary for. It must only be seen once by the callback function.
TEST(ScudoWrappersCTest,MallocIterateBoundary)317 TEST(ScudoWrappersCTest, MallocIterateBoundary) {
318   const size_t PageSize = sysconf(_SC_PAGESIZE);
319   const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
320   const size_t SpecialSize = PageSize - BlockDelta;
321 
322   // We aren't guaranteed that any size class is exactly a page wide. So we need
323   // to keep making allocations until we succeed.
324   //
325   // With a 16-byte block alignment and 4096-byte page size, each allocation has
326   // a probability of (1 - (16/4096)) of failing to meet the alignment
327   // requirements, and the probability of failing 65536 times is
328   // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
329   // 65536 tries, give up.
330   uintptr_t Block;
331   void *P = nullptr;
332   for (unsigned I = 0; I != 65536; ++I) {
333     void *PrevP = P;
334     P = malloc(SpecialSize);
335     EXPECT_NE(P, nullptr);
336     *reinterpret_cast<void **>(P) = PrevP;
337     BoundaryP = reinterpret_cast<uintptr_t>(P);
338     Block = BoundaryP - BlockDelta;
339     if ((Block & (PageSize - 1)) == 0U)
340       break;
341   }
342   EXPECT_EQ((Block & (PageSize - 1)), 0U);
343 
344   Count = 0U;
345   malloc_disable();
346   malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
347   malloc_iterate(Block, PageSize, callback, nullptr);
348   malloc_enable();
349   EXPECT_EQ(Count, 1U);
350 
351   while (P) {
352     void *NextP = *reinterpret_cast<void **>(P);
353     free(P);
354     P = NextP;
355   }
356 }
357 
358 // Fuchsia doesn't have alarm, fork or malloc_info.
359 #if !SCUDO_FUCHSIA
TEST(ScudoWrappersCDeathTest,MallocDisableDeadlock)360 TEST(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
361   // We expect heap operations within a disable/enable scope to deadlock.
362   EXPECT_DEATH(
363       {
364         void *P = malloc(Size);
365         EXPECT_NE(P, nullptr);
366         free(P);
367         malloc_disable();
368         alarm(1);
369         P = malloc(Size);
370         malloc_enable();
371       },
372       "");
373 }
374 
TEST(ScudoWrappersCTest,MallocInfo)375 TEST(ScudoWrappersCTest, MallocInfo) {
376   // Use volatile so that the allocations don't get optimized away.
377   void *volatile P1 = malloc(1234);
378   void *volatile P2 = malloc(4321);
379 
380   char Buffer[16384];
381   FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
382   EXPECT_NE(F, nullptr);
383   errno = 0;
384   EXPECT_EQ(malloc_info(0, F), 0);
385   EXPECT_EQ(errno, 0);
386   fclose(F);
387   EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
388   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
389   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
390 
391   free(P1);
392   free(P2);
393 }
394 
TEST(ScudoWrappersCDeathTest,Fork)395 TEST(ScudoWrappersCDeathTest, Fork) {
396   void *P;
397   pid_t Pid = fork();
398   EXPECT_GE(Pid, 0) << strerror(errno);
399   if (Pid == 0) {
400     P = malloc(Size);
401     EXPECT_NE(P, nullptr);
402     memset(P, 0x42, Size);
403     free(P);
404     _exit(0);
405   }
406   waitpid(Pid, nullptr, 0);
407   P = malloc(Size);
408   EXPECT_NE(P, nullptr);
409   memset(P, 0x42, Size);
410   free(P);
411 
412   // fork should stall if the allocator has been disabled.
413   EXPECT_DEATH(
414       {
415         malloc_disable();
416         alarm(1);
417         Pid = fork();
418         EXPECT_GE(Pid, 0);
419       },
420       "");
421 }
422 
423 static pthread_mutex_t Mutex;
424 static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
425 static bool Ready;
426 
enableMalloc(void * Unused)427 static void *enableMalloc(void *Unused) {
428   // Initialize the allocator for this thread.
429   void *P = malloc(Size);
430   EXPECT_NE(P, nullptr);
431   memset(P, 0x42, Size);
432   free(P);
433 
434   // Signal the main thread we are ready.
435   pthread_mutex_lock(&Mutex);
436   Ready = true;
437   pthread_cond_signal(&Conditional);
438   pthread_mutex_unlock(&Mutex);
439 
440   // Wait for the malloc_disable & fork, then enable the allocator again.
441   sleep(1);
442   malloc_enable();
443 
444   return nullptr;
445 }
446 
TEST(ScudoWrappersCTest,DisableForkEnable)447 TEST(ScudoWrappersCTest, DisableForkEnable) {
448   pthread_t ThreadId;
449   Ready = false;
450   EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
451 
452   // Wait for the thread to be warmed up.
453   pthread_mutex_lock(&Mutex);
454   while (!Ready)
455     pthread_cond_wait(&Conditional, &Mutex);
456   pthread_mutex_unlock(&Mutex);
457 
458   // Disable the allocator and fork. fork should succeed after malloc_enable.
459   malloc_disable();
460   pid_t Pid = fork();
461   EXPECT_GE(Pid, 0);
462   if (Pid == 0) {
463     void *P = malloc(Size);
464     EXPECT_NE(P, nullptr);
465     memset(P, 0x42, Size);
466     free(P);
467     _exit(0);
468   }
469   waitpid(Pid, nullptr, 0);
470   EXPECT_EQ(pthread_join(ThreadId, 0), 0);
471 }
472 
473 #endif // SCUDO_FUCHSIA
474