xref: /openbsd-src/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp (revision d89ec533011f513df1010f142a111086a0785f09)
1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "memtag.h"
10 #include "scudo/interface.h"
11 #include "tests/scudo_unit_test.h"
12 
13 #include <errno.h>
14 #include <limits.h>
15 #include <malloc.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 
19 extern "C" {
20 void malloc_enable(void);
21 void malloc_disable(void);
22 int malloc_iterate(uintptr_t base, size_t size,
23                    void (*callback)(uintptr_t base, size_t size, void *arg),
24                    void *arg);
25 void *valloc(size_t size);
26 void *pvalloc(size_t size);
27 }
28 
29 // Note that every C allocation function in the test binary will be fulfilled
30 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
31 // But this might also lead to unexpected side-effects, since the allocation and
32 // deallocation operations in the TEST functions will coexist with others (see
33 // the EXPECT_DEATH comment below).
34 
35 // We have to use a small quarantine to make sure that our double-free tests
36 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
37 // freed (this depends on the size obviously) and the following free succeeds.
38 
39 static const size_t Size = 100U;
40 
41 TEST(ScudoWrappersCDeathTest, Malloc) {
42   void *P = malloc(Size);
43   EXPECT_NE(P, nullptr);
44   EXPECT_LE(Size, malloc_usable_size(P));
45   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
46 
47   // An update to this warning in Clang now triggers in this line, but it's ok
48   // because the check is expecting a bad pointer and should fail.
49 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
50 #pragma GCC diagnostic push
51 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
52 #endif
53   EXPECT_DEATH(
54       free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
55 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
56 #pragma GCC diagnostic pop
57 #endif
58 
59   free(P);
60   EXPECT_DEATH(free(P), "");
61 
62   P = malloc(0U);
63   EXPECT_NE(P, nullptr);
64   free(P);
65 
66   errno = 0;
67   EXPECT_EQ(malloc(SIZE_MAX), nullptr);
68   EXPECT_EQ(errno, ENOMEM);
69 }
70 
71 TEST(ScudoWrappersCTest, Calloc) {
72   void *P = calloc(1U, Size);
73   EXPECT_NE(P, nullptr);
74   EXPECT_LE(Size, malloc_usable_size(P));
75   for (size_t I = 0; I < Size; I++)
76     EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
77   free(P);
78 
79   P = calloc(1U, 0U);
80   EXPECT_NE(P, nullptr);
81   free(P);
82   P = calloc(0U, 1U);
83   EXPECT_NE(P, nullptr);
84   free(P);
85 
86   errno = 0;
87   EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
88   EXPECT_EQ(errno, ENOMEM);
89   errno = 0;
90   EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
91   if (SCUDO_ANDROID)
92     EXPECT_EQ(errno, ENOMEM);
93   errno = 0;
94   EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
95   EXPECT_EQ(errno, ENOMEM);
96 }
97 
98 TEST(ScudoWrappersCTest, SmallAlign) {
99   void *P;
100   for (size_t Size = 1; Size <= 0x10000; Size <<= 1) {
101     for (size_t Align = 1; Align <= 0x10000; Align <<= 1) {
102       for (size_t Count = 0; Count < 3; ++Count) {
103         P = memalign(Align, Size);
104         EXPECT_TRUE(reinterpret_cast<uintptr_t>(P) % Align == 0);
105       }
106     }
107   }
108 }
109 
110 TEST(ScudoWrappersCTest, Memalign) {
111   void *P;
112   for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
113     const size_t Alignment = 1U << I;
114 
115     P = memalign(Alignment, Size);
116     EXPECT_NE(P, nullptr);
117     EXPECT_LE(Size, malloc_usable_size(P));
118     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
119     free(P);
120 
121     P = nullptr;
122     EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
123     EXPECT_NE(P, nullptr);
124     EXPECT_LE(Size, malloc_usable_size(P));
125     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
126     free(P);
127   }
128 
129   EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
130   EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
131   EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
132 
133   // Android's memalign accepts non power-of-2 alignments, and 0.
134   if (SCUDO_ANDROID) {
135     for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
136       P = memalign(Alignment, 1024U);
137       EXPECT_NE(P, nullptr);
138       free(P);
139     }
140   }
141 }
142 
143 TEST(ScudoWrappersCTest, AlignedAlloc) {
144   const size_t Alignment = 4096U;
145   void *P = aligned_alloc(Alignment, Alignment * 4U);
146   EXPECT_NE(P, nullptr);
147   EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
148   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
149   free(P);
150 
151   errno = 0;
152   P = aligned_alloc(Alignment, Size);
153   EXPECT_EQ(P, nullptr);
154   EXPECT_EQ(errno, EINVAL);
155 }
156 
157 TEST(ScudoWrappersCDeathTest, Realloc) {
158   // realloc(nullptr, N) is malloc(N)
159   void *P = realloc(nullptr, 0U);
160   EXPECT_NE(P, nullptr);
161   free(P);
162 
163   P = malloc(Size);
164   EXPECT_NE(P, nullptr);
165   // realloc(P, 0U) is free(P) and returns nullptr
166   EXPECT_EQ(realloc(P, 0U), nullptr);
167 
168   P = malloc(Size);
169   EXPECT_NE(P, nullptr);
170   EXPECT_LE(Size, malloc_usable_size(P));
171   memset(P, 0x42, Size);
172 
173   P = realloc(P, Size * 2U);
174   EXPECT_NE(P, nullptr);
175   EXPECT_LE(Size * 2U, malloc_usable_size(P));
176   for (size_t I = 0; I < Size; I++)
177     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
178 
179   P = realloc(P, Size / 2U);
180   EXPECT_NE(P, nullptr);
181   EXPECT_LE(Size / 2U, malloc_usable_size(P));
182   for (size_t I = 0; I < Size / 2U; I++)
183     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
184   free(P);
185 
186   EXPECT_DEATH(P = realloc(P, Size), "");
187 
188   errno = 0;
189   EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
190   EXPECT_EQ(errno, ENOMEM);
191   P = malloc(Size);
192   EXPECT_NE(P, nullptr);
193   errno = 0;
194   EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
195   EXPECT_EQ(errno, ENOMEM);
196   free(P);
197 
198   // Android allows realloc of memalign pointers.
199   if (SCUDO_ANDROID) {
200     const size_t Alignment = 1024U;
201     P = memalign(Alignment, Size);
202     EXPECT_NE(P, nullptr);
203     EXPECT_LE(Size, malloc_usable_size(P));
204     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
205     memset(P, 0x42, Size);
206 
207     P = realloc(P, Size * 2U);
208     EXPECT_NE(P, nullptr);
209     EXPECT_LE(Size * 2U, malloc_usable_size(P));
210     for (size_t I = 0; I < Size; I++)
211       EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
212     free(P);
213   }
214 }
215 
216 #if !SCUDO_FUCHSIA
217 TEST(ScudoWrappersCTest, MallOpt) {
218   errno = 0;
219   EXPECT_EQ(mallopt(-1000, 1), 0);
220   // mallopt doesn't set errno.
221   EXPECT_EQ(errno, 0);
222 
223   EXPECT_EQ(mallopt(M_PURGE, 0), 1);
224 
225   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
226   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
227   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
228   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
229 
230   if (SCUDO_ANDROID) {
231     EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX, 100), 1);
232     EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2), 1);
233     EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX, 10), 1);
234   }
235 }
236 #endif
237 
238 TEST(ScudoWrappersCTest, OtherAlloc) {
239 #if !SCUDO_FUCHSIA
240   const size_t PageSize = sysconf(_SC_PAGESIZE);
241 
242   void *P = pvalloc(Size);
243   EXPECT_NE(P, nullptr);
244   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
245   EXPECT_LE(PageSize, malloc_usable_size(P));
246   free(P);
247 
248   EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
249 
250   P = pvalloc(Size);
251   EXPECT_NE(P, nullptr);
252   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
253   free(P);
254 #endif
255 
256   EXPECT_EQ(valloc(SIZE_MAX), nullptr);
257 }
258 
259 #if !SCUDO_FUCHSIA
260 TEST(ScudoWrappersCTest, MallInfo) {
261   const size_t BypassQuarantineSize = 1024U;
262 
263   struct mallinfo MI = mallinfo();
264   size_t Allocated = MI.uordblks;
265   void *P = malloc(BypassQuarantineSize);
266   EXPECT_NE(P, nullptr);
267   MI = mallinfo();
268   EXPECT_GE(static_cast<size_t>(MI.uordblks), Allocated + BypassQuarantineSize);
269   EXPECT_GT(static_cast<size_t>(MI.hblkhd), 0U);
270   size_t Free = MI.fordblks;
271   free(P);
272   MI = mallinfo();
273   EXPECT_GE(static_cast<size_t>(MI.fordblks), Free + BypassQuarantineSize);
274 }
275 #endif
276 
277 static uintptr_t BoundaryP;
278 static size_t Count;
279 
280 static void callback(uintptr_t Base, size_t Size, void *Arg) {
281   if (scudo::archSupportsMemoryTagging()) {
282     Base = scudo::untagPointer(Base);
283     BoundaryP = scudo::untagPointer(BoundaryP);
284   }
285   if (Base == BoundaryP)
286     Count++;
287 }
288 
289 // Verify that a block located on an iteration boundary is not mis-accounted.
290 // To achieve this, we allocate a chunk for which the backing block will be
291 // aligned on a page, then run the malloc_iterate on both the pages that the
292 // block is a boundary for. It must only be seen once by the callback function.
293 TEST(ScudoWrappersCTest, MallocIterateBoundary) {
294   const size_t PageSize = sysconf(_SC_PAGESIZE);
295   const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
296   const size_t SpecialSize = PageSize - BlockDelta;
297 
298   // We aren't guaranteed that any size class is exactly a page wide. So we need
299   // to keep making allocations until we succeed.
300   //
301   // With a 16-byte block alignment and 4096-byte page size, each allocation has
302   // a probability of (1 - (16/4096)) of failing to meet the alignment
303   // requirements, and the probability of failing 65536 times is
304   // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
305   // 65536 tries, give up.
306   uintptr_t Block;
307   void *P = nullptr;
308   for (unsigned I = 0; I != 65536; ++I) {
309     void *PrevP = P;
310     P = malloc(SpecialSize);
311     EXPECT_NE(P, nullptr);
312     *reinterpret_cast<void **>(P) = PrevP;
313     BoundaryP = reinterpret_cast<uintptr_t>(P);
314     Block = BoundaryP - BlockDelta;
315     if ((Block & (PageSize - 1)) == 0U)
316       break;
317   }
318   EXPECT_EQ((Block & (PageSize - 1)), 0U);
319 
320   Count = 0U;
321   malloc_disable();
322   malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
323   malloc_iterate(Block, PageSize, callback, nullptr);
324   malloc_enable();
325   EXPECT_EQ(Count, 1U);
326 
327   while (P) {
328     void *NextP = *reinterpret_cast<void **>(P);
329     free(P);
330     P = NextP;
331   }
332 }
333 
334 // Fuchsia doesn't have alarm, fork or malloc_info.
335 #if !SCUDO_FUCHSIA
336 TEST(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
337   // We expect heap operations within a disable/enable scope to deadlock.
338   EXPECT_DEATH(
339       {
340         void *P = malloc(Size);
341         EXPECT_NE(P, nullptr);
342         free(P);
343         malloc_disable();
344         alarm(1);
345         P = malloc(Size);
346         malloc_enable();
347       },
348       "");
349 }
350 
351 TEST(ScudoWrappersCTest, MallocInfo) {
352   // Use volatile so that the allocations don't get optimized away.
353   void *volatile P1 = malloc(1234);
354   void *volatile P2 = malloc(4321);
355 
356   char Buffer[16384];
357   FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
358   EXPECT_NE(F, nullptr);
359   errno = 0;
360   EXPECT_EQ(malloc_info(0, F), 0);
361   EXPECT_EQ(errno, 0);
362   fclose(F);
363   EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
364   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
365   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
366 
367   free(P1);
368   free(P2);
369 }
370 
371 TEST(ScudoWrappersCDeathTest, Fork) {
372   void *P;
373   pid_t Pid = fork();
374   EXPECT_GE(Pid, 0) << strerror(errno);
375   if (Pid == 0) {
376     P = malloc(Size);
377     EXPECT_NE(P, nullptr);
378     memset(P, 0x42, Size);
379     free(P);
380     _exit(0);
381   }
382   waitpid(Pid, nullptr, 0);
383   P = malloc(Size);
384   EXPECT_NE(P, nullptr);
385   memset(P, 0x42, Size);
386   free(P);
387 
388   // fork should stall if the allocator has been disabled.
389   EXPECT_DEATH(
390       {
391         malloc_disable();
392         alarm(1);
393         Pid = fork();
394         EXPECT_GE(Pid, 0);
395       },
396       "");
397 }
398 
399 static pthread_mutex_t Mutex;
400 static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
401 static bool Ready;
402 
403 static void *enableMalloc(void *Unused) {
404   // Initialize the allocator for this thread.
405   void *P = malloc(Size);
406   EXPECT_NE(P, nullptr);
407   memset(P, 0x42, Size);
408   free(P);
409 
410   // Signal the main thread we are ready.
411   pthread_mutex_lock(&Mutex);
412   Ready = true;
413   pthread_cond_signal(&Conditional);
414   pthread_mutex_unlock(&Mutex);
415 
416   // Wait for the malloc_disable & fork, then enable the allocator again.
417   sleep(1);
418   malloc_enable();
419 
420   return nullptr;
421 }
422 
423 TEST(ScudoWrappersCTest, DisableForkEnable) {
424   pthread_t ThreadId;
425   Ready = false;
426   EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
427 
428   // Wait for the thread to be warmed up.
429   pthread_mutex_lock(&Mutex);
430   while (!Ready)
431     pthread_cond_wait(&Conditional, &Mutex);
432   pthread_mutex_unlock(&Mutex);
433 
434   // Disable the allocator and fork. fork should succeed after malloc_enable.
435   malloc_disable();
436   pid_t Pid = fork();
437   EXPECT_GE(Pid, 0);
438   if (Pid == 0) {
439     void *P = malloc(Size);
440     EXPECT_NE(P, nullptr);
441     memset(P, 0x42, Size);
442     free(P);
443     _exit(0);
444   }
445   waitpid(Pid, nullptr, 0);
446   EXPECT_EQ(pthread_join(ThreadId, 0), 0);
447 }
448 
449 #endif // SCUDO_FUCHSIA
450