xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp (revision 68d75eff68281c1b445e3010bb975eae07aac225)
1*68d75effSDimitry Andric //=-- lsan_allocator.cpp --------------------------------------------------===//
2*68d75effSDimitry Andric //
3*68d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*68d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*68d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*68d75effSDimitry Andric //
7*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
8*68d75effSDimitry Andric //
9*68d75effSDimitry Andric // This file is a part of LeakSanitizer.
10*68d75effSDimitry Andric // See lsan_allocator.h for details.
11*68d75effSDimitry Andric //
12*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
13*68d75effSDimitry Andric 
14*68d75effSDimitry Andric #include "lsan_allocator.h"
15*68d75effSDimitry Andric 
16*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator.h"
17*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_checks.h"
18*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_interface.h"
19*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_report.h"
20*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_errno.h"
21*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_internal_defs.h"
22*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
23*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_stacktrace.h"
24*68d75effSDimitry Andric #include "lsan_common.h"
25*68d75effSDimitry Andric 
26*68d75effSDimitry Andric extern "C" void *memset(void *ptr, int value, uptr num);
27*68d75effSDimitry Andric 
28*68d75effSDimitry Andric namespace __lsan {
29*68d75effSDimitry Andric #if defined(__i386__) || defined(__arm__)
30*68d75effSDimitry Andric static const uptr kMaxAllowedMallocSize = 1UL << 30;
31*68d75effSDimitry Andric #elif defined(__mips64) || defined(__aarch64__)
32*68d75effSDimitry Andric static const uptr kMaxAllowedMallocSize = 4UL << 30;
33*68d75effSDimitry Andric #else
34*68d75effSDimitry Andric static const uptr kMaxAllowedMallocSize = 8UL << 30;
35*68d75effSDimitry Andric #endif
36*68d75effSDimitry Andric 
37*68d75effSDimitry Andric static Allocator allocator;
38*68d75effSDimitry Andric 
39*68d75effSDimitry Andric void InitializeAllocator() {
40*68d75effSDimitry Andric   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
41*68d75effSDimitry Andric   allocator.InitLinkerInitialized(
42*68d75effSDimitry Andric       common_flags()->allocator_release_to_os_interval_ms);
43*68d75effSDimitry Andric }
44*68d75effSDimitry Andric 
45*68d75effSDimitry Andric void AllocatorThreadFinish() {
46*68d75effSDimitry Andric   allocator.SwallowCache(GetAllocatorCache());
47*68d75effSDimitry Andric }
48*68d75effSDimitry Andric 
49*68d75effSDimitry Andric static ChunkMetadata *Metadata(const void *p) {
50*68d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
51*68d75effSDimitry Andric }
52*68d75effSDimitry Andric 
53*68d75effSDimitry Andric static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
54*68d75effSDimitry Andric   if (!p) return;
55*68d75effSDimitry Andric   ChunkMetadata *m = Metadata(p);
56*68d75effSDimitry Andric   CHECK(m);
57*68d75effSDimitry Andric   m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
58*68d75effSDimitry Andric   m->stack_trace_id = StackDepotPut(stack);
59*68d75effSDimitry Andric   m->requested_size = size;
60*68d75effSDimitry Andric   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
61*68d75effSDimitry Andric }
62*68d75effSDimitry Andric 
63*68d75effSDimitry Andric static void RegisterDeallocation(void *p) {
64*68d75effSDimitry Andric   if (!p) return;
65*68d75effSDimitry Andric   ChunkMetadata *m = Metadata(p);
66*68d75effSDimitry Andric   CHECK(m);
67*68d75effSDimitry Andric   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
68*68d75effSDimitry Andric }
69*68d75effSDimitry Andric 
70*68d75effSDimitry Andric static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
71*68d75effSDimitry Andric   if (AllocatorMayReturnNull()) {
72*68d75effSDimitry Andric     Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
73*68d75effSDimitry Andric     return nullptr;
74*68d75effSDimitry Andric   }
75*68d75effSDimitry Andric   ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
76*68d75effSDimitry Andric }
77*68d75effSDimitry Andric 
78*68d75effSDimitry Andric void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
79*68d75effSDimitry Andric                bool cleared) {
80*68d75effSDimitry Andric   if (size == 0)
81*68d75effSDimitry Andric     size = 1;
82*68d75effSDimitry Andric   if (size > kMaxAllowedMallocSize)
83*68d75effSDimitry Andric     return ReportAllocationSizeTooBig(size, stack);
84*68d75effSDimitry Andric   void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
85*68d75effSDimitry Andric   if (UNLIKELY(!p)) {
86*68d75effSDimitry Andric     SetAllocatorOutOfMemory();
87*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
88*68d75effSDimitry Andric       return nullptr;
89*68d75effSDimitry Andric     ReportOutOfMemory(size, &stack);
90*68d75effSDimitry Andric   }
91*68d75effSDimitry Andric   // Do not rely on the allocator to clear the memory (it's slow).
92*68d75effSDimitry Andric   if (cleared && allocator.FromPrimary(p))
93*68d75effSDimitry Andric     memset(p, 0, size);
94*68d75effSDimitry Andric   RegisterAllocation(stack, p, size);
95*68d75effSDimitry Andric   if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
96*68d75effSDimitry Andric   RunMallocHooks(p, size);
97*68d75effSDimitry Andric   return p;
98*68d75effSDimitry Andric }
99*68d75effSDimitry Andric 
100*68d75effSDimitry Andric static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
101*68d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
102*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
103*68d75effSDimitry Andric       return nullptr;
104*68d75effSDimitry Andric     ReportCallocOverflow(nmemb, size, &stack);
105*68d75effSDimitry Andric   }
106*68d75effSDimitry Andric   size *= nmemb;
107*68d75effSDimitry Andric   return Allocate(stack, size, 1, true);
108*68d75effSDimitry Andric }
109*68d75effSDimitry Andric 
110*68d75effSDimitry Andric void Deallocate(void *p) {
111*68d75effSDimitry Andric   if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
112*68d75effSDimitry Andric   RunFreeHooks(p);
113*68d75effSDimitry Andric   RegisterDeallocation(p);
114*68d75effSDimitry Andric   allocator.Deallocate(GetAllocatorCache(), p);
115*68d75effSDimitry Andric }
116*68d75effSDimitry Andric 
117*68d75effSDimitry Andric void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
118*68d75effSDimitry Andric                  uptr alignment) {
119*68d75effSDimitry Andric   RegisterDeallocation(p);
120*68d75effSDimitry Andric   if (new_size > kMaxAllowedMallocSize) {
121*68d75effSDimitry Andric     allocator.Deallocate(GetAllocatorCache(), p);
122*68d75effSDimitry Andric     return ReportAllocationSizeTooBig(new_size, stack);
123*68d75effSDimitry Andric   }
124*68d75effSDimitry Andric   p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
125*68d75effSDimitry Andric   RegisterAllocation(stack, p, new_size);
126*68d75effSDimitry Andric   return p;
127*68d75effSDimitry Andric }
128*68d75effSDimitry Andric 
129*68d75effSDimitry Andric void GetAllocatorCacheRange(uptr *begin, uptr *end) {
130*68d75effSDimitry Andric   *begin = (uptr)GetAllocatorCache();
131*68d75effSDimitry Andric   *end = *begin + sizeof(AllocatorCache);
132*68d75effSDimitry Andric }
133*68d75effSDimitry Andric 
134*68d75effSDimitry Andric uptr GetMallocUsableSize(const void *p) {
135*68d75effSDimitry Andric   ChunkMetadata *m = Metadata(p);
136*68d75effSDimitry Andric   if (!m) return 0;
137*68d75effSDimitry Andric   return m->requested_size;
138*68d75effSDimitry Andric }
139*68d75effSDimitry Andric 
140*68d75effSDimitry Andric int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
141*68d75effSDimitry Andric                         const StackTrace &stack) {
142*68d75effSDimitry Andric   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
143*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
144*68d75effSDimitry Andric       return errno_EINVAL;
145*68d75effSDimitry Andric     ReportInvalidPosixMemalignAlignment(alignment, &stack);
146*68d75effSDimitry Andric   }
147*68d75effSDimitry Andric   void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
148*68d75effSDimitry Andric   if (UNLIKELY(!ptr))
149*68d75effSDimitry Andric     // OOM error is already taken care of by Allocate.
150*68d75effSDimitry Andric     return errno_ENOMEM;
151*68d75effSDimitry Andric   CHECK(IsAligned((uptr)ptr, alignment));
152*68d75effSDimitry Andric   *memptr = ptr;
153*68d75effSDimitry Andric   return 0;
154*68d75effSDimitry Andric }
155*68d75effSDimitry Andric 
156*68d75effSDimitry Andric void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
157*68d75effSDimitry Andric   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
158*68d75effSDimitry Andric     errno = errno_EINVAL;
159*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
160*68d75effSDimitry Andric       return nullptr;
161*68d75effSDimitry Andric     ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
162*68d75effSDimitry Andric   }
163*68d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
164*68d75effSDimitry Andric }
165*68d75effSDimitry Andric 
166*68d75effSDimitry Andric void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
167*68d75effSDimitry Andric   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
168*68d75effSDimitry Andric     errno = errno_EINVAL;
169*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
170*68d75effSDimitry Andric       return nullptr;
171*68d75effSDimitry Andric     ReportInvalidAllocationAlignment(alignment, &stack);
172*68d75effSDimitry Andric   }
173*68d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
174*68d75effSDimitry Andric }
175*68d75effSDimitry Andric 
176*68d75effSDimitry Andric void *lsan_malloc(uptr size, const StackTrace &stack) {
177*68d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
178*68d75effSDimitry Andric }
179*68d75effSDimitry Andric 
180*68d75effSDimitry Andric void lsan_free(void *p) {
181*68d75effSDimitry Andric   Deallocate(p);
182*68d75effSDimitry Andric }
183*68d75effSDimitry Andric 
184*68d75effSDimitry Andric void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
185*68d75effSDimitry Andric   return SetErrnoOnNull(Reallocate(stack, p, size, 1));
186*68d75effSDimitry Andric }
187*68d75effSDimitry Andric 
188*68d75effSDimitry Andric void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
189*68d75effSDimitry Andric                         const StackTrace &stack) {
190*68d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
191*68d75effSDimitry Andric     errno = errno_ENOMEM;
192*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
193*68d75effSDimitry Andric       return nullptr;
194*68d75effSDimitry Andric     ReportReallocArrayOverflow(nmemb, size, &stack);
195*68d75effSDimitry Andric   }
196*68d75effSDimitry Andric   return lsan_realloc(ptr, nmemb * size, stack);
197*68d75effSDimitry Andric }
198*68d75effSDimitry Andric 
199*68d75effSDimitry Andric void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
200*68d75effSDimitry Andric   return SetErrnoOnNull(Calloc(nmemb, size, stack));
201*68d75effSDimitry Andric }
202*68d75effSDimitry Andric 
203*68d75effSDimitry Andric void *lsan_valloc(uptr size, const StackTrace &stack) {
204*68d75effSDimitry Andric   return SetErrnoOnNull(
205*68d75effSDimitry Andric       Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
206*68d75effSDimitry Andric }
207*68d75effSDimitry Andric 
208*68d75effSDimitry Andric void *lsan_pvalloc(uptr size, const StackTrace &stack) {
209*68d75effSDimitry Andric   uptr PageSize = GetPageSizeCached();
210*68d75effSDimitry Andric   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
211*68d75effSDimitry Andric     errno = errno_ENOMEM;
212*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
213*68d75effSDimitry Andric       return nullptr;
214*68d75effSDimitry Andric     ReportPvallocOverflow(size, &stack);
215*68d75effSDimitry Andric   }
216*68d75effSDimitry Andric   // pvalloc(0) should allocate one page.
217*68d75effSDimitry Andric   size = size ? RoundUpTo(size, PageSize) : PageSize;
218*68d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
219*68d75effSDimitry Andric }
220*68d75effSDimitry Andric 
221*68d75effSDimitry Andric uptr lsan_mz_size(const void *p) {
222*68d75effSDimitry Andric   return GetMallocUsableSize(p);
223*68d75effSDimitry Andric }
224*68d75effSDimitry Andric 
225*68d75effSDimitry Andric ///// Interface to the common LSan module. /////
226*68d75effSDimitry Andric 
227*68d75effSDimitry Andric void LockAllocator() {
228*68d75effSDimitry Andric   allocator.ForceLock();
229*68d75effSDimitry Andric }
230*68d75effSDimitry Andric 
231*68d75effSDimitry Andric void UnlockAllocator() {
232*68d75effSDimitry Andric   allocator.ForceUnlock();
233*68d75effSDimitry Andric }
234*68d75effSDimitry Andric 
235*68d75effSDimitry Andric void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
236*68d75effSDimitry Andric   *begin = (uptr)&allocator;
237*68d75effSDimitry Andric   *end = *begin + sizeof(allocator);
238*68d75effSDimitry Andric }
239*68d75effSDimitry Andric 
240*68d75effSDimitry Andric uptr PointsIntoChunk(void* p) {
241*68d75effSDimitry Andric   uptr addr = reinterpret_cast<uptr>(p);
242*68d75effSDimitry Andric   uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
243*68d75effSDimitry Andric   if (!chunk) return 0;
244*68d75effSDimitry Andric   // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
245*68d75effSDimitry Andric   // valid, but we don't want that.
246*68d75effSDimitry Andric   if (addr < chunk) return 0;
247*68d75effSDimitry Andric   ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
248*68d75effSDimitry Andric   CHECK(m);
249*68d75effSDimitry Andric   if (!m->allocated)
250*68d75effSDimitry Andric     return 0;
251*68d75effSDimitry Andric   if (addr < chunk + m->requested_size)
252*68d75effSDimitry Andric     return chunk;
253*68d75effSDimitry Andric   if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
254*68d75effSDimitry Andric     return chunk;
255*68d75effSDimitry Andric   return 0;
256*68d75effSDimitry Andric }
257*68d75effSDimitry Andric 
258*68d75effSDimitry Andric uptr GetUserBegin(uptr chunk) {
259*68d75effSDimitry Andric   return chunk;
260*68d75effSDimitry Andric }
261*68d75effSDimitry Andric 
262*68d75effSDimitry Andric LsanMetadata::LsanMetadata(uptr chunk) {
263*68d75effSDimitry Andric   metadata_ = Metadata(reinterpret_cast<void *>(chunk));
264*68d75effSDimitry Andric   CHECK(metadata_);
265*68d75effSDimitry Andric }
266*68d75effSDimitry Andric 
267*68d75effSDimitry Andric bool LsanMetadata::allocated() const {
268*68d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
269*68d75effSDimitry Andric }
270*68d75effSDimitry Andric 
271*68d75effSDimitry Andric ChunkTag LsanMetadata::tag() const {
272*68d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
273*68d75effSDimitry Andric }
274*68d75effSDimitry Andric 
275*68d75effSDimitry Andric void LsanMetadata::set_tag(ChunkTag value) {
276*68d75effSDimitry Andric   reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
277*68d75effSDimitry Andric }
278*68d75effSDimitry Andric 
279*68d75effSDimitry Andric uptr LsanMetadata::requested_size() const {
280*68d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
281*68d75effSDimitry Andric }
282*68d75effSDimitry Andric 
283*68d75effSDimitry Andric u32 LsanMetadata::stack_trace_id() const {
284*68d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
285*68d75effSDimitry Andric }
286*68d75effSDimitry Andric 
287*68d75effSDimitry Andric void ForEachChunk(ForEachChunkCallback callback, void *arg) {
288*68d75effSDimitry Andric   allocator.ForEachChunk(callback, arg);
289*68d75effSDimitry Andric }
290*68d75effSDimitry Andric 
291*68d75effSDimitry Andric IgnoreObjectResult IgnoreObjectLocked(const void *p) {
292*68d75effSDimitry Andric   void *chunk = allocator.GetBlockBegin(p);
293*68d75effSDimitry Andric   if (!chunk || p < chunk) return kIgnoreObjectInvalid;
294*68d75effSDimitry Andric   ChunkMetadata *m = Metadata(chunk);
295*68d75effSDimitry Andric   CHECK(m);
296*68d75effSDimitry Andric   if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
297*68d75effSDimitry Andric     if (m->tag == kIgnored)
298*68d75effSDimitry Andric       return kIgnoreObjectAlreadyIgnored;
299*68d75effSDimitry Andric     m->tag = kIgnored;
300*68d75effSDimitry Andric     return kIgnoreObjectSuccess;
301*68d75effSDimitry Andric   } else {
302*68d75effSDimitry Andric     return kIgnoreObjectInvalid;
303*68d75effSDimitry Andric   }
304*68d75effSDimitry Andric }
305*68d75effSDimitry Andric } // namespace __lsan
306*68d75effSDimitry Andric 
307*68d75effSDimitry Andric using namespace __lsan;
308*68d75effSDimitry Andric 
309*68d75effSDimitry Andric extern "C" {
310*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
311*68d75effSDimitry Andric uptr __sanitizer_get_current_allocated_bytes() {
312*68d75effSDimitry Andric   uptr stats[AllocatorStatCount];
313*68d75effSDimitry Andric   allocator.GetStats(stats);
314*68d75effSDimitry Andric   return stats[AllocatorStatAllocated];
315*68d75effSDimitry Andric }
316*68d75effSDimitry Andric 
317*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
318*68d75effSDimitry Andric uptr __sanitizer_get_heap_size() {
319*68d75effSDimitry Andric   uptr stats[AllocatorStatCount];
320*68d75effSDimitry Andric   allocator.GetStats(stats);
321*68d75effSDimitry Andric   return stats[AllocatorStatMapped];
322*68d75effSDimitry Andric }
323*68d75effSDimitry Andric 
324*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
325*68d75effSDimitry Andric uptr __sanitizer_get_free_bytes() { return 0; }
326*68d75effSDimitry Andric 
327*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
328*68d75effSDimitry Andric uptr __sanitizer_get_unmapped_bytes() { return 0; }
329*68d75effSDimitry Andric 
330*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
331*68d75effSDimitry Andric uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
332*68d75effSDimitry Andric 
333*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
334*68d75effSDimitry Andric int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
335*68d75effSDimitry Andric 
336*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
337*68d75effSDimitry Andric uptr __sanitizer_get_allocated_size(const void *p) {
338*68d75effSDimitry Andric   return GetMallocUsableSize(p);
339*68d75effSDimitry Andric }
340*68d75effSDimitry Andric 
341*68d75effSDimitry Andric #if !SANITIZER_SUPPORTS_WEAK_HOOKS
342*68d75effSDimitry Andric // Provide default (no-op) implementation of malloc hooks.
343*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
344*68d75effSDimitry Andric void __sanitizer_malloc_hook(void *ptr, uptr size) {
345*68d75effSDimitry Andric   (void)ptr;
346*68d75effSDimitry Andric   (void)size;
347*68d75effSDimitry Andric }
348*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
349*68d75effSDimitry Andric void __sanitizer_free_hook(void *ptr) {
350*68d75effSDimitry Andric   (void)ptr;
351*68d75effSDimitry Andric }
352*68d75effSDimitry Andric #endif
353*68d75effSDimitry Andric } // extern "C"
354