xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp (revision 0eae32dcef82f6f06de6419a0d623d7def0cc8f6)
168d75effSDimitry Andric //=-- lsan_allocator.cpp --------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of LeakSanitizer.
1068d75effSDimitry Andric // See lsan_allocator.h for details.
1168d75effSDimitry Andric //
1268d75effSDimitry Andric //===----------------------------------------------------------------------===//
1368d75effSDimitry Andric 
1468d75effSDimitry Andric #include "lsan_allocator.h"
1568d75effSDimitry Andric 
1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator.h"
1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_checks.h"
1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_interface.h"
1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_report.h"
2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_errno.h"
2168d75effSDimitry Andric #include "sanitizer_common/sanitizer_internal_defs.h"
2268d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
2368d75effSDimitry Andric #include "sanitizer_common/sanitizer_stacktrace.h"
2468d75effSDimitry Andric #include "lsan_common.h"
2568d75effSDimitry Andric 
2668d75effSDimitry Andric extern "C" void *memset(void *ptr, int value, uptr num);
2768d75effSDimitry Andric 
2868d75effSDimitry Andric namespace __lsan {
2968d75effSDimitry Andric #if defined(__i386__) || defined(__arm__)
30*0eae32dcSDimitry Andric static const uptr kMaxAllowedMallocSize = 1ULL << 30;
3168d75effSDimitry Andric #elif defined(__mips64) || defined(__aarch64__)
32*0eae32dcSDimitry Andric static const uptr kMaxAllowedMallocSize = 4ULL << 30;
3368d75effSDimitry Andric #else
34*0eae32dcSDimitry Andric static const uptr kMaxAllowedMallocSize = 8ULL << 30;
3568d75effSDimitry Andric #endif
3668d75effSDimitry Andric 
3768d75effSDimitry Andric static Allocator allocator;
3868d75effSDimitry Andric 
39480093f4SDimitry Andric static uptr max_malloc_size;
40480093f4SDimitry Andric 
4168d75effSDimitry Andric void InitializeAllocator() {
4268d75effSDimitry Andric   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
4368d75effSDimitry Andric   allocator.InitLinkerInitialized(
4468d75effSDimitry Andric       common_flags()->allocator_release_to_os_interval_ms);
45480093f4SDimitry Andric   if (common_flags()->max_allocation_size_mb)
46480093f4SDimitry Andric     max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
47480093f4SDimitry Andric                           kMaxAllowedMallocSize);
48480093f4SDimitry Andric   else
49480093f4SDimitry Andric     max_malloc_size = kMaxAllowedMallocSize;
5068d75effSDimitry Andric }
5168d75effSDimitry Andric 
5268d75effSDimitry Andric void AllocatorThreadFinish() {
5368d75effSDimitry Andric   allocator.SwallowCache(GetAllocatorCache());
5468d75effSDimitry Andric }
5568d75effSDimitry Andric 
5668d75effSDimitry Andric static ChunkMetadata *Metadata(const void *p) {
5768d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
5868d75effSDimitry Andric }
5968d75effSDimitry Andric 
6068d75effSDimitry Andric static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
6168d75effSDimitry Andric   if (!p) return;
6268d75effSDimitry Andric   ChunkMetadata *m = Metadata(p);
6368d75effSDimitry Andric   CHECK(m);
6468d75effSDimitry Andric   m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
6568d75effSDimitry Andric   m->stack_trace_id = StackDepotPut(stack);
6668d75effSDimitry Andric   m->requested_size = size;
6768d75effSDimitry Andric   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
6868d75effSDimitry Andric }
6968d75effSDimitry Andric 
7068d75effSDimitry Andric static void RegisterDeallocation(void *p) {
7168d75effSDimitry Andric   if (!p) return;
7268d75effSDimitry Andric   ChunkMetadata *m = Metadata(p);
7368d75effSDimitry Andric   CHECK(m);
7468d75effSDimitry Andric   atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
7568d75effSDimitry Andric }
7668d75effSDimitry Andric 
7768d75effSDimitry Andric static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
7868d75effSDimitry Andric   if (AllocatorMayReturnNull()) {
7968d75effSDimitry Andric     Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
8068d75effSDimitry Andric     return nullptr;
8168d75effSDimitry Andric   }
82480093f4SDimitry Andric   ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
8368d75effSDimitry Andric }
8468d75effSDimitry Andric 
8568d75effSDimitry Andric void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
8668d75effSDimitry Andric                bool cleared) {
8768d75effSDimitry Andric   if (size == 0)
8868d75effSDimitry Andric     size = 1;
89480093f4SDimitry Andric   if (size > max_malloc_size)
9068d75effSDimitry Andric     return ReportAllocationSizeTooBig(size, stack);
91*0eae32dcSDimitry Andric   if (UNLIKELY(IsRssLimitExceeded())) {
92*0eae32dcSDimitry Andric     if (AllocatorMayReturnNull())
93*0eae32dcSDimitry Andric       return nullptr;
94*0eae32dcSDimitry Andric     ReportRssLimitExceeded(&stack);
95*0eae32dcSDimitry Andric   }
9668d75effSDimitry Andric   void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
9768d75effSDimitry Andric   if (UNLIKELY(!p)) {
9868d75effSDimitry Andric     SetAllocatorOutOfMemory();
9968d75effSDimitry Andric     if (AllocatorMayReturnNull())
10068d75effSDimitry Andric       return nullptr;
10168d75effSDimitry Andric     ReportOutOfMemory(size, &stack);
10268d75effSDimitry Andric   }
10368d75effSDimitry Andric   // Do not rely on the allocator to clear the memory (it's slow).
10468d75effSDimitry Andric   if (cleared && allocator.FromPrimary(p))
10568d75effSDimitry Andric     memset(p, 0, size);
10668d75effSDimitry Andric   RegisterAllocation(stack, p, size);
10768d75effSDimitry Andric   if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
10868d75effSDimitry Andric   RunMallocHooks(p, size);
10968d75effSDimitry Andric   return p;
11068d75effSDimitry Andric }
11168d75effSDimitry Andric 
11268d75effSDimitry Andric static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
11368d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
11468d75effSDimitry Andric     if (AllocatorMayReturnNull())
11568d75effSDimitry Andric       return nullptr;
11668d75effSDimitry Andric     ReportCallocOverflow(nmemb, size, &stack);
11768d75effSDimitry Andric   }
11868d75effSDimitry Andric   size *= nmemb;
11968d75effSDimitry Andric   return Allocate(stack, size, 1, true);
12068d75effSDimitry Andric }
12168d75effSDimitry Andric 
12268d75effSDimitry Andric void Deallocate(void *p) {
12368d75effSDimitry Andric   if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
12468d75effSDimitry Andric   RunFreeHooks(p);
12568d75effSDimitry Andric   RegisterDeallocation(p);
12668d75effSDimitry Andric   allocator.Deallocate(GetAllocatorCache(), p);
12768d75effSDimitry Andric }
12868d75effSDimitry Andric 
12968d75effSDimitry Andric void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
13068d75effSDimitry Andric                  uptr alignment) {
131480093f4SDimitry Andric   if (new_size > max_malloc_size) {
132fe6060f1SDimitry Andric     ReportAllocationSizeTooBig(new_size, stack);
133fe6060f1SDimitry Andric     return nullptr;
13468d75effSDimitry Andric   }
135fe6060f1SDimitry Andric   RegisterDeallocation(p);
136fe6060f1SDimitry Andric   void *new_p =
137fe6060f1SDimitry Andric       allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
138fe6060f1SDimitry Andric   if (new_p)
139fe6060f1SDimitry Andric     RegisterAllocation(stack, new_p, new_size);
140fe6060f1SDimitry Andric   else if (new_size != 0)
14168d75effSDimitry Andric     RegisterAllocation(stack, p, new_size);
142fe6060f1SDimitry Andric   return new_p;
14368d75effSDimitry Andric }
14468d75effSDimitry Andric 
14568d75effSDimitry Andric void GetAllocatorCacheRange(uptr *begin, uptr *end) {
14668d75effSDimitry Andric   *begin = (uptr)GetAllocatorCache();
14768d75effSDimitry Andric   *end = *begin + sizeof(AllocatorCache);
14868d75effSDimitry Andric }
14968d75effSDimitry Andric 
15068d75effSDimitry Andric uptr GetMallocUsableSize(const void *p) {
15168d75effSDimitry Andric   ChunkMetadata *m = Metadata(p);
15268d75effSDimitry Andric   if (!m) return 0;
15368d75effSDimitry Andric   return m->requested_size;
15468d75effSDimitry Andric }
15568d75effSDimitry Andric 
15668d75effSDimitry Andric int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
15768d75effSDimitry Andric                         const StackTrace &stack) {
15868d75effSDimitry Andric   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
15968d75effSDimitry Andric     if (AllocatorMayReturnNull())
16068d75effSDimitry Andric       return errno_EINVAL;
16168d75effSDimitry Andric     ReportInvalidPosixMemalignAlignment(alignment, &stack);
16268d75effSDimitry Andric   }
16368d75effSDimitry Andric   void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
16468d75effSDimitry Andric   if (UNLIKELY(!ptr))
16568d75effSDimitry Andric     // OOM error is already taken care of by Allocate.
16668d75effSDimitry Andric     return errno_ENOMEM;
16768d75effSDimitry Andric   CHECK(IsAligned((uptr)ptr, alignment));
16868d75effSDimitry Andric   *memptr = ptr;
16968d75effSDimitry Andric   return 0;
17068d75effSDimitry Andric }
17168d75effSDimitry Andric 
17268d75effSDimitry Andric void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
17368d75effSDimitry Andric   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
17468d75effSDimitry Andric     errno = errno_EINVAL;
17568d75effSDimitry Andric     if (AllocatorMayReturnNull())
17668d75effSDimitry Andric       return nullptr;
17768d75effSDimitry Andric     ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
17868d75effSDimitry Andric   }
17968d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
18068d75effSDimitry Andric }
18168d75effSDimitry Andric 
18268d75effSDimitry Andric void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
18368d75effSDimitry Andric   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
18468d75effSDimitry Andric     errno = errno_EINVAL;
18568d75effSDimitry Andric     if (AllocatorMayReturnNull())
18668d75effSDimitry Andric       return nullptr;
18768d75effSDimitry Andric     ReportInvalidAllocationAlignment(alignment, &stack);
18868d75effSDimitry Andric   }
18968d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
19068d75effSDimitry Andric }
19168d75effSDimitry Andric 
19268d75effSDimitry Andric void *lsan_malloc(uptr size, const StackTrace &stack) {
19368d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
19468d75effSDimitry Andric }
19568d75effSDimitry Andric 
19668d75effSDimitry Andric void lsan_free(void *p) {
19768d75effSDimitry Andric   Deallocate(p);
19868d75effSDimitry Andric }
19968d75effSDimitry Andric 
20068d75effSDimitry Andric void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
20168d75effSDimitry Andric   return SetErrnoOnNull(Reallocate(stack, p, size, 1));
20268d75effSDimitry Andric }
20368d75effSDimitry Andric 
20468d75effSDimitry Andric void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
20568d75effSDimitry Andric                         const StackTrace &stack) {
20668d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
20768d75effSDimitry Andric     errno = errno_ENOMEM;
20868d75effSDimitry Andric     if (AllocatorMayReturnNull())
20968d75effSDimitry Andric       return nullptr;
21068d75effSDimitry Andric     ReportReallocArrayOverflow(nmemb, size, &stack);
21168d75effSDimitry Andric   }
21268d75effSDimitry Andric   return lsan_realloc(ptr, nmemb * size, stack);
21368d75effSDimitry Andric }
21468d75effSDimitry Andric 
21568d75effSDimitry Andric void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
21668d75effSDimitry Andric   return SetErrnoOnNull(Calloc(nmemb, size, stack));
21768d75effSDimitry Andric }
21868d75effSDimitry Andric 
21968d75effSDimitry Andric void *lsan_valloc(uptr size, const StackTrace &stack) {
22068d75effSDimitry Andric   return SetErrnoOnNull(
22168d75effSDimitry Andric       Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
22268d75effSDimitry Andric }
22368d75effSDimitry Andric 
22468d75effSDimitry Andric void *lsan_pvalloc(uptr size, const StackTrace &stack) {
22568d75effSDimitry Andric   uptr PageSize = GetPageSizeCached();
22668d75effSDimitry Andric   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
22768d75effSDimitry Andric     errno = errno_ENOMEM;
22868d75effSDimitry Andric     if (AllocatorMayReturnNull())
22968d75effSDimitry Andric       return nullptr;
23068d75effSDimitry Andric     ReportPvallocOverflow(size, &stack);
23168d75effSDimitry Andric   }
23268d75effSDimitry Andric   // pvalloc(0) should allocate one page.
23368d75effSDimitry Andric   size = size ? RoundUpTo(size, PageSize) : PageSize;
23468d75effSDimitry Andric   return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
23568d75effSDimitry Andric }
23668d75effSDimitry Andric 
23768d75effSDimitry Andric uptr lsan_mz_size(const void *p) {
23868d75effSDimitry Andric   return GetMallocUsableSize(p);
23968d75effSDimitry Andric }
24068d75effSDimitry Andric 
24168d75effSDimitry Andric ///// Interface to the common LSan module. /////
24268d75effSDimitry Andric 
24368d75effSDimitry Andric void LockAllocator() {
24468d75effSDimitry Andric   allocator.ForceLock();
24568d75effSDimitry Andric }
24668d75effSDimitry Andric 
24768d75effSDimitry Andric void UnlockAllocator() {
24868d75effSDimitry Andric   allocator.ForceUnlock();
24968d75effSDimitry Andric }
25068d75effSDimitry Andric 
25168d75effSDimitry Andric void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
25268d75effSDimitry Andric   *begin = (uptr)&allocator;
25368d75effSDimitry Andric   *end = *begin + sizeof(allocator);
25468d75effSDimitry Andric }
25568d75effSDimitry Andric 
25668d75effSDimitry Andric uptr PointsIntoChunk(void* p) {
25768d75effSDimitry Andric   uptr addr = reinterpret_cast<uptr>(p);
25868d75effSDimitry Andric   uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
25968d75effSDimitry Andric   if (!chunk) return 0;
26068d75effSDimitry Andric   // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
26168d75effSDimitry Andric   // valid, but we don't want that.
26268d75effSDimitry Andric   if (addr < chunk) return 0;
26368d75effSDimitry Andric   ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
26468d75effSDimitry Andric   CHECK(m);
26568d75effSDimitry Andric   if (!m->allocated)
26668d75effSDimitry Andric     return 0;
26768d75effSDimitry Andric   if (addr < chunk + m->requested_size)
26868d75effSDimitry Andric     return chunk;
26968d75effSDimitry Andric   if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
27068d75effSDimitry Andric     return chunk;
27168d75effSDimitry Andric   return 0;
27268d75effSDimitry Andric }
27368d75effSDimitry Andric 
27468d75effSDimitry Andric uptr GetUserBegin(uptr chunk) {
27568d75effSDimitry Andric   return chunk;
27668d75effSDimitry Andric }
27768d75effSDimitry Andric 
27868d75effSDimitry Andric LsanMetadata::LsanMetadata(uptr chunk) {
27968d75effSDimitry Andric   metadata_ = Metadata(reinterpret_cast<void *>(chunk));
28068d75effSDimitry Andric   CHECK(metadata_);
28168d75effSDimitry Andric }
28268d75effSDimitry Andric 
28368d75effSDimitry Andric bool LsanMetadata::allocated() const {
28468d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
28568d75effSDimitry Andric }
28668d75effSDimitry Andric 
28768d75effSDimitry Andric ChunkTag LsanMetadata::tag() const {
28868d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
28968d75effSDimitry Andric }
29068d75effSDimitry Andric 
29168d75effSDimitry Andric void LsanMetadata::set_tag(ChunkTag value) {
29268d75effSDimitry Andric   reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
29368d75effSDimitry Andric }
29468d75effSDimitry Andric 
29568d75effSDimitry Andric uptr LsanMetadata::requested_size() const {
29668d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
29768d75effSDimitry Andric }
29868d75effSDimitry Andric 
29968d75effSDimitry Andric u32 LsanMetadata::stack_trace_id() const {
30068d75effSDimitry Andric   return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
30168d75effSDimitry Andric }
30268d75effSDimitry Andric 
30368d75effSDimitry Andric void ForEachChunk(ForEachChunkCallback callback, void *arg) {
30468d75effSDimitry Andric   allocator.ForEachChunk(callback, arg);
30568d75effSDimitry Andric }
30668d75effSDimitry Andric 
30768d75effSDimitry Andric IgnoreObjectResult IgnoreObjectLocked(const void *p) {
30868d75effSDimitry Andric   void *chunk = allocator.GetBlockBegin(p);
30968d75effSDimitry Andric   if (!chunk || p < chunk) return kIgnoreObjectInvalid;
31068d75effSDimitry Andric   ChunkMetadata *m = Metadata(chunk);
31168d75effSDimitry Andric   CHECK(m);
31268d75effSDimitry Andric   if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
31368d75effSDimitry Andric     if (m->tag == kIgnored)
31468d75effSDimitry Andric       return kIgnoreObjectAlreadyIgnored;
31568d75effSDimitry Andric     m->tag = kIgnored;
31668d75effSDimitry Andric     return kIgnoreObjectSuccess;
31768d75effSDimitry Andric   } else {
31868d75effSDimitry Andric     return kIgnoreObjectInvalid;
31968d75effSDimitry Andric   }
32068d75effSDimitry Andric }
321e8d8bef9SDimitry Andric 
322e8d8bef9SDimitry Andric void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
323e8d8bef9SDimitry Andric   // This function can be used to treat memory reachable from `tctx` as live.
324e8d8bef9SDimitry Andric   // This is useful for threads that have been created but not yet started.
325e8d8bef9SDimitry Andric 
326e8d8bef9SDimitry Andric   // This is currently a no-op because the LSan `pthread_create()` interceptor
327e8d8bef9SDimitry Andric   // blocks until the child thread starts which keeps the thread's `arg` pointer
328e8d8bef9SDimitry Andric   // live.
329e8d8bef9SDimitry Andric }
330e8d8bef9SDimitry Andric 
33168d75effSDimitry Andric } // namespace __lsan
33268d75effSDimitry Andric 
33368d75effSDimitry Andric using namespace __lsan;
33468d75effSDimitry Andric 
33568d75effSDimitry Andric extern "C" {
33668d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
33768d75effSDimitry Andric uptr __sanitizer_get_current_allocated_bytes() {
33868d75effSDimitry Andric   uptr stats[AllocatorStatCount];
33968d75effSDimitry Andric   allocator.GetStats(stats);
34068d75effSDimitry Andric   return stats[AllocatorStatAllocated];
34168d75effSDimitry Andric }
34268d75effSDimitry Andric 
34368d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
34468d75effSDimitry Andric uptr __sanitizer_get_heap_size() {
34568d75effSDimitry Andric   uptr stats[AllocatorStatCount];
34668d75effSDimitry Andric   allocator.GetStats(stats);
34768d75effSDimitry Andric   return stats[AllocatorStatMapped];
34868d75effSDimitry Andric }
34968d75effSDimitry Andric 
35068d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
35168d75effSDimitry Andric uptr __sanitizer_get_free_bytes() { return 0; }
35268d75effSDimitry Andric 
35368d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
35468d75effSDimitry Andric uptr __sanitizer_get_unmapped_bytes() { return 0; }
35568d75effSDimitry Andric 
35668d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
35768d75effSDimitry Andric uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
35868d75effSDimitry Andric 
35968d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
36068d75effSDimitry Andric int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
36168d75effSDimitry Andric 
36268d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
36368d75effSDimitry Andric uptr __sanitizer_get_allocated_size(const void *p) {
36468d75effSDimitry Andric   return GetMallocUsableSize(p);
36568d75effSDimitry Andric }
36668d75effSDimitry Andric 
36768d75effSDimitry Andric #if !SANITIZER_SUPPORTS_WEAK_HOOKS
36868d75effSDimitry Andric // Provide default (no-op) implementation of malloc hooks.
36968d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
37068d75effSDimitry Andric void __sanitizer_malloc_hook(void *ptr, uptr size) {
37168d75effSDimitry Andric   (void)ptr;
37268d75effSDimitry Andric   (void)size;
37368d75effSDimitry Andric }
37468d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
37568d75effSDimitry Andric void __sanitizer_free_hook(void *ptr) {
37668d75effSDimitry Andric   (void)ptr;
37768d75effSDimitry Andric }
37868d75effSDimitry Andric #endif
37968d75effSDimitry Andric } // extern "C"
380