xref: /openbsd-src/gnu/llvm/compiler-rt/lib/msan/msan_allocator.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- msan_allocator.cpp -------------------------- ---------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of MemorySanitizer.
103cab2bb3Spatrick //
113cab2bb3Spatrick // MemorySanitizer allocator.
123cab2bb3Spatrick //===----------------------------------------------------------------------===//
133cab2bb3Spatrick 
143cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator.h"
153cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_checks.h"
163cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_interface.h"
173cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_report.h"
183cab2bb3Spatrick #include "sanitizer_common/sanitizer_errno.h"
193cab2bb3Spatrick #include "msan.h"
203cab2bb3Spatrick #include "msan_allocator.h"
213cab2bb3Spatrick #include "msan_origin.h"
223cab2bb3Spatrick #include "msan_thread.h"
233cab2bb3Spatrick #include "msan_poisoning.h"
243cab2bb3Spatrick 
253cab2bb3Spatrick namespace __msan {
263cab2bb3Spatrick 
273cab2bb3Spatrick struct Metadata {
283cab2bb3Spatrick   uptr requested_size;
293cab2bb3Spatrick };
303cab2bb3Spatrick 
313cab2bb3Spatrick struct MsanMapUnmapCallback {
OnMap__msan::MsanMapUnmapCallback323cab2bb3Spatrick   void OnMap(uptr p, uptr size) const {}
OnUnmap__msan::MsanMapUnmapCallback333cab2bb3Spatrick   void OnUnmap(uptr p, uptr size) const {
343cab2bb3Spatrick     __msan_unpoison((void *)p, size);
353cab2bb3Spatrick 
363cab2bb3Spatrick     // We are about to unmap a chunk of user memory.
373cab2bb3Spatrick     // Mark the corresponding shadow memory as not needed.
383cab2bb3Spatrick     uptr shadow_p = MEM_TO_SHADOW(p);
393cab2bb3Spatrick     ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
403cab2bb3Spatrick     if (__msan_get_track_origins()) {
413cab2bb3Spatrick       uptr origin_p = MEM_TO_ORIGIN(p);
423cab2bb3Spatrick       ReleaseMemoryPagesToOS(origin_p, origin_p + size);
433cab2bb3Spatrick     }
443cab2bb3Spatrick   }
453cab2bb3Spatrick };
463cab2bb3Spatrick 
473cab2bb3Spatrick #if defined(__mips64)
483cab2bb3Spatrick static const uptr kMaxAllowedMallocSize = 2UL << 30;
493cab2bb3Spatrick 
503cab2bb3Spatrick struct AP32 {
513cab2bb3Spatrick   static const uptr kSpaceBeg = 0;
523cab2bb3Spatrick   static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
533cab2bb3Spatrick   static const uptr kMetadataSize = sizeof(Metadata);
543cab2bb3Spatrick   typedef __sanitizer::CompactSizeClassMap SizeClassMap;
553cab2bb3Spatrick   static const uptr kRegionSizeLog = 20;
563cab2bb3Spatrick   using AddressSpaceView = LocalAddressSpaceView;
573cab2bb3Spatrick   typedef MsanMapUnmapCallback MapUnmapCallback;
583cab2bb3Spatrick   static const uptr kFlags = 0;
593cab2bb3Spatrick };
603cab2bb3Spatrick typedef SizeClassAllocator32<AP32> PrimaryAllocator;
613cab2bb3Spatrick #elif defined(__x86_64__)
62*810390e3Srobert #if SANITIZER_NETBSD || SANITIZER_LINUX
633cab2bb3Spatrick static const uptr kAllocatorSpace = 0x700000000000ULL;
643cab2bb3Spatrick #else
653cab2bb3Spatrick static const uptr kAllocatorSpace = 0x600000000000ULL;
663cab2bb3Spatrick #endif
673cab2bb3Spatrick static const uptr kMaxAllowedMallocSize = 8UL << 30;
683cab2bb3Spatrick 
693cab2bb3Spatrick struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
703cab2bb3Spatrick   static const uptr kSpaceBeg = kAllocatorSpace;
713cab2bb3Spatrick   static const uptr kSpaceSize = 0x40000000000;  // 4T.
723cab2bb3Spatrick   static const uptr kMetadataSize = sizeof(Metadata);
733cab2bb3Spatrick   typedef DefaultSizeClassMap SizeClassMap;
743cab2bb3Spatrick   typedef MsanMapUnmapCallback MapUnmapCallback;
753cab2bb3Spatrick   static const uptr kFlags = 0;
763cab2bb3Spatrick   using AddressSpaceView = LocalAddressSpaceView;
773cab2bb3Spatrick };
783cab2bb3Spatrick 
793cab2bb3Spatrick typedef SizeClassAllocator64<AP64> PrimaryAllocator;
803cab2bb3Spatrick 
813cab2bb3Spatrick #elif defined(__powerpc64__)
823cab2bb3Spatrick static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
833cab2bb3Spatrick 
843cab2bb3Spatrick struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
853cab2bb3Spatrick   static const uptr kSpaceBeg = 0x300000000000;
863cab2bb3Spatrick   static const uptr kSpaceSize = 0x020000000000;  // 2T.
873cab2bb3Spatrick   static const uptr kMetadataSize = sizeof(Metadata);
883cab2bb3Spatrick   typedef DefaultSizeClassMap SizeClassMap;
893cab2bb3Spatrick   typedef MsanMapUnmapCallback MapUnmapCallback;
903cab2bb3Spatrick   static const uptr kFlags = 0;
913cab2bb3Spatrick   using AddressSpaceView = LocalAddressSpaceView;
923cab2bb3Spatrick };
933cab2bb3Spatrick 
943cab2bb3Spatrick typedef SizeClassAllocator64<AP64> PrimaryAllocator;
951f9cb04fSpatrick #elif defined(__s390x__)
961f9cb04fSpatrick static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
971f9cb04fSpatrick 
981f9cb04fSpatrick struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
991f9cb04fSpatrick   static const uptr kSpaceBeg = 0x440000000000;
1001f9cb04fSpatrick   static const uptr kSpaceSize = 0x020000000000;  // 2T.
1011f9cb04fSpatrick   static const uptr kMetadataSize = sizeof(Metadata);
1021f9cb04fSpatrick   typedef DefaultSizeClassMap SizeClassMap;
1031f9cb04fSpatrick   typedef MsanMapUnmapCallback MapUnmapCallback;
1041f9cb04fSpatrick   static const uptr kFlags = 0;
1051f9cb04fSpatrick   using AddressSpaceView = LocalAddressSpaceView;
1061f9cb04fSpatrick };
1071f9cb04fSpatrick 
1081f9cb04fSpatrick typedef SizeClassAllocator64<AP64> PrimaryAllocator;
1093cab2bb3Spatrick #elif defined(__aarch64__)
110*810390e3Srobert static const uptr kMaxAllowedMallocSize = 8UL << 30;
1113cab2bb3Spatrick 
112*810390e3Srobert struct AP64 {
113*810390e3Srobert   static const uptr kSpaceBeg = 0xE00000000000ULL;
114*810390e3Srobert   static const uptr kSpaceSize = 0x40000000000;  // 4T.
1153cab2bb3Spatrick   static const uptr kMetadataSize = sizeof(Metadata);
116*810390e3Srobert   typedef DefaultSizeClassMap SizeClassMap;
1173cab2bb3Spatrick   typedef MsanMapUnmapCallback MapUnmapCallback;
1183cab2bb3Spatrick   static const uptr kFlags = 0;
119*810390e3Srobert   using AddressSpaceView = LocalAddressSpaceView;
1203cab2bb3Spatrick };
121*810390e3Srobert typedef SizeClassAllocator64<AP64> PrimaryAllocator;
1223cab2bb3Spatrick #endif
1233cab2bb3Spatrick typedef CombinedAllocator<PrimaryAllocator> Allocator;
1243cab2bb3Spatrick typedef Allocator::AllocatorCache AllocatorCache;
1253cab2bb3Spatrick 
1263cab2bb3Spatrick static Allocator allocator;
1273cab2bb3Spatrick static AllocatorCache fallback_allocator_cache;
1283cab2bb3Spatrick static StaticSpinMutex fallback_mutex;
1293cab2bb3Spatrick 
1303cab2bb3Spatrick static uptr max_malloc_size;
1313cab2bb3Spatrick 
MsanAllocatorInit()1323cab2bb3Spatrick void MsanAllocatorInit() {
1333cab2bb3Spatrick   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
1343cab2bb3Spatrick   allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
1353cab2bb3Spatrick   if (common_flags()->max_allocation_size_mb)
1363cab2bb3Spatrick     max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
1373cab2bb3Spatrick                           kMaxAllowedMallocSize);
1383cab2bb3Spatrick   else
1393cab2bb3Spatrick     max_malloc_size = kMaxAllowedMallocSize;
1403cab2bb3Spatrick }
1413cab2bb3Spatrick 
GetAllocatorCache(MsanThreadLocalMallocStorage * ms)1423cab2bb3Spatrick AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
1433cab2bb3Spatrick   CHECK(ms);
1443cab2bb3Spatrick   CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
1453cab2bb3Spatrick   return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
1463cab2bb3Spatrick }
1473cab2bb3Spatrick 
CommitBack()1483cab2bb3Spatrick void MsanThreadLocalMallocStorage::CommitBack() {
1493cab2bb3Spatrick   allocator.SwallowCache(GetAllocatorCache(this));
1503cab2bb3Spatrick }
1513cab2bb3Spatrick 
MsanAllocate(StackTrace * stack,uptr size,uptr alignment,bool zeroise)1523cab2bb3Spatrick static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
1533cab2bb3Spatrick                           bool zeroise) {
1543cab2bb3Spatrick   if (size > max_malloc_size) {
1553cab2bb3Spatrick     if (AllocatorMayReturnNull()) {
1563cab2bb3Spatrick       Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
1573cab2bb3Spatrick       return nullptr;
1583cab2bb3Spatrick     }
1593cab2bb3Spatrick     ReportAllocationSizeTooBig(size, max_malloc_size, stack);
1603cab2bb3Spatrick   }
161*810390e3Srobert   if (UNLIKELY(IsRssLimitExceeded())) {
162*810390e3Srobert     if (AllocatorMayReturnNull())
163*810390e3Srobert       return nullptr;
164*810390e3Srobert     ReportRssLimitExceeded(stack);
165*810390e3Srobert   }
1663cab2bb3Spatrick   MsanThread *t = GetCurrentThread();
1673cab2bb3Spatrick   void *allocated;
1683cab2bb3Spatrick   if (t) {
1693cab2bb3Spatrick     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
1703cab2bb3Spatrick     allocated = allocator.Allocate(cache, size, alignment);
1713cab2bb3Spatrick   } else {
1723cab2bb3Spatrick     SpinMutexLock l(&fallback_mutex);
1733cab2bb3Spatrick     AllocatorCache *cache = &fallback_allocator_cache;
1743cab2bb3Spatrick     allocated = allocator.Allocate(cache, size, alignment);
1753cab2bb3Spatrick   }
1763cab2bb3Spatrick   if (UNLIKELY(!allocated)) {
1773cab2bb3Spatrick     SetAllocatorOutOfMemory();
1783cab2bb3Spatrick     if (AllocatorMayReturnNull())
1793cab2bb3Spatrick       return nullptr;
1803cab2bb3Spatrick     ReportOutOfMemory(size, stack);
1813cab2bb3Spatrick   }
1823cab2bb3Spatrick   Metadata *meta =
1833cab2bb3Spatrick       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
1843cab2bb3Spatrick   meta->requested_size = size;
1853cab2bb3Spatrick   if (zeroise) {
1863cab2bb3Spatrick     __msan_clear_and_unpoison(allocated, size);
1873cab2bb3Spatrick   } else if (flags()->poison_in_malloc) {
1883cab2bb3Spatrick     __msan_poison(allocated, size);
1893cab2bb3Spatrick     if (__msan_get_track_origins()) {
1903cab2bb3Spatrick       stack->tag = StackTrace::TAG_ALLOC;
1913cab2bb3Spatrick       Origin o = Origin::CreateHeapOrigin(stack);
1923cab2bb3Spatrick       __msan_set_origin(allocated, size, o.raw_id());
1933cab2bb3Spatrick     }
1943cab2bb3Spatrick   }
195*810390e3Srobert   UnpoisonParam(2);
196*810390e3Srobert   RunMallocHooks(allocated, size);
1973cab2bb3Spatrick   return allocated;
1983cab2bb3Spatrick }
1993cab2bb3Spatrick 
MsanDeallocate(StackTrace * stack,void * p)2003cab2bb3Spatrick void MsanDeallocate(StackTrace *stack, void *p) {
2013cab2bb3Spatrick   CHECK(p);
202*810390e3Srobert   UnpoisonParam(1);
203*810390e3Srobert   RunFreeHooks(p);
204*810390e3Srobert 
2053cab2bb3Spatrick   Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
2063cab2bb3Spatrick   uptr size = meta->requested_size;
2073cab2bb3Spatrick   meta->requested_size = 0;
2083cab2bb3Spatrick   // This memory will not be reused by anyone else, so we are free to keep it
2093cab2bb3Spatrick   // poisoned.
2103cab2bb3Spatrick   if (flags()->poison_in_free) {
2113cab2bb3Spatrick     __msan_poison(p, size);
2123cab2bb3Spatrick     if (__msan_get_track_origins()) {
2133cab2bb3Spatrick       stack->tag = StackTrace::TAG_DEALLOC;
2143cab2bb3Spatrick       Origin o = Origin::CreateHeapOrigin(stack);
2153cab2bb3Spatrick       __msan_set_origin(p, size, o.raw_id());
2163cab2bb3Spatrick     }
2173cab2bb3Spatrick   }
2183cab2bb3Spatrick   MsanThread *t = GetCurrentThread();
2193cab2bb3Spatrick   if (t) {
2203cab2bb3Spatrick     AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
2213cab2bb3Spatrick     allocator.Deallocate(cache, p);
2223cab2bb3Spatrick   } else {
2233cab2bb3Spatrick     SpinMutexLock l(&fallback_mutex);
2243cab2bb3Spatrick     AllocatorCache *cache = &fallback_allocator_cache;
2253cab2bb3Spatrick     allocator.Deallocate(cache, p);
2263cab2bb3Spatrick   }
2273cab2bb3Spatrick }
2283cab2bb3Spatrick 
MsanReallocate(StackTrace * stack,void * old_p,uptr new_size,uptr alignment)229d89ec533Spatrick static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
2303cab2bb3Spatrick                             uptr alignment) {
2313cab2bb3Spatrick   Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
2323cab2bb3Spatrick   uptr old_size = meta->requested_size;
2333cab2bb3Spatrick   uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
2343cab2bb3Spatrick   if (new_size <= actually_allocated_size) {
2353cab2bb3Spatrick     // We are not reallocating here.
2363cab2bb3Spatrick     meta->requested_size = new_size;
2373cab2bb3Spatrick     if (new_size > old_size) {
2383cab2bb3Spatrick       if (flags()->poison_in_malloc) {
2393cab2bb3Spatrick         stack->tag = StackTrace::TAG_ALLOC;
2403cab2bb3Spatrick         PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
2413cab2bb3Spatrick       }
2423cab2bb3Spatrick     }
2433cab2bb3Spatrick     return old_p;
2443cab2bb3Spatrick   }
2453cab2bb3Spatrick   uptr memcpy_size = Min(new_size, old_size);
2463cab2bb3Spatrick   void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
2473cab2bb3Spatrick   if (new_p) {
2483cab2bb3Spatrick     CopyMemory(new_p, old_p, memcpy_size, stack);
2493cab2bb3Spatrick     MsanDeallocate(stack, old_p);
2503cab2bb3Spatrick   }
2513cab2bb3Spatrick   return new_p;
2523cab2bb3Spatrick }
2533cab2bb3Spatrick 
MsanCalloc(StackTrace * stack,uptr nmemb,uptr size)254d89ec533Spatrick static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
2553cab2bb3Spatrick   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
2563cab2bb3Spatrick     if (AllocatorMayReturnNull())
2573cab2bb3Spatrick       return nullptr;
2583cab2bb3Spatrick     ReportCallocOverflow(nmemb, size, stack);
2593cab2bb3Spatrick   }
2603cab2bb3Spatrick   return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
2613cab2bb3Spatrick }
2623cab2bb3Spatrick 
AllocationSize(const void * p)2633cab2bb3Spatrick static uptr AllocationSize(const void *p) {
2643cab2bb3Spatrick   if (!p) return 0;
2653cab2bb3Spatrick   const void *beg = allocator.GetBlockBegin(p);
2663cab2bb3Spatrick   if (beg != p) return 0;
2673cab2bb3Spatrick   Metadata *b = (Metadata *)allocator.GetMetaData(p);
2683cab2bb3Spatrick   return b->requested_size;
2693cab2bb3Spatrick }
2703cab2bb3Spatrick 
msan_malloc(uptr size,StackTrace * stack)2713cab2bb3Spatrick void *msan_malloc(uptr size, StackTrace *stack) {
2723cab2bb3Spatrick   return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
2733cab2bb3Spatrick }
2743cab2bb3Spatrick 
msan_calloc(uptr nmemb,uptr size,StackTrace * stack)2753cab2bb3Spatrick void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
2763cab2bb3Spatrick   return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
2773cab2bb3Spatrick }
2783cab2bb3Spatrick 
msan_realloc(void * ptr,uptr size,StackTrace * stack)2793cab2bb3Spatrick void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
2803cab2bb3Spatrick   if (!ptr)
2813cab2bb3Spatrick     return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
2823cab2bb3Spatrick   if (size == 0) {
2833cab2bb3Spatrick     MsanDeallocate(stack, ptr);
2843cab2bb3Spatrick     return nullptr;
2853cab2bb3Spatrick   }
2863cab2bb3Spatrick   return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
2873cab2bb3Spatrick }
2883cab2bb3Spatrick 
msan_reallocarray(void * ptr,uptr nmemb,uptr size,StackTrace * stack)2893cab2bb3Spatrick void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
2903cab2bb3Spatrick   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
2913cab2bb3Spatrick     errno = errno_ENOMEM;
2923cab2bb3Spatrick     if (AllocatorMayReturnNull())
2933cab2bb3Spatrick       return nullptr;
2943cab2bb3Spatrick     ReportReallocArrayOverflow(nmemb, size, stack);
2953cab2bb3Spatrick   }
2963cab2bb3Spatrick   return msan_realloc(ptr, nmemb * size, stack);
2973cab2bb3Spatrick }
2983cab2bb3Spatrick 
msan_valloc(uptr size,StackTrace * stack)2993cab2bb3Spatrick void *msan_valloc(uptr size, StackTrace *stack) {
3003cab2bb3Spatrick   return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
3013cab2bb3Spatrick }
3023cab2bb3Spatrick 
msan_pvalloc(uptr size,StackTrace * stack)3033cab2bb3Spatrick void *msan_pvalloc(uptr size, StackTrace *stack) {
3043cab2bb3Spatrick   uptr PageSize = GetPageSizeCached();
3053cab2bb3Spatrick   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
3063cab2bb3Spatrick     errno = errno_ENOMEM;
3073cab2bb3Spatrick     if (AllocatorMayReturnNull())
3083cab2bb3Spatrick       return nullptr;
3093cab2bb3Spatrick     ReportPvallocOverflow(size, stack);
3103cab2bb3Spatrick   }
3113cab2bb3Spatrick   // pvalloc(0) should allocate one page.
3123cab2bb3Spatrick   size = size ? RoundUpTo(size, PageSize) : PageSize;
3133cab2bb3Spatrick   return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
3143cab2bb3Spatrick }
3153cab2bb3Spatrick 
msan_aligned_alloc(uptr alignment,uptr size,StackTrace * stack)3163cab2bb3Spatrick void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
3173cab2bb3Spatrick   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
3183cab2bb3Spatrick     errno = errno_EINVAL;
3193cab2bb3Spatrick     if (AllocatorMayReturnNull())
3203cab2bb3Spatrick       return nullptr;
3213cab2bb3Spatrick     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
3223cab2bb3Spatrick   }
3233cab2bb3Spatrick   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
3243cab2bb3Spatrick }
3253cab2bb3Spatrick 
msan_memalign(uptr alignment,uptr size,StackTrace * stack)3263cab2bb3Spatrick void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
3273cab2bb3Spatrick   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
3283cab2bb3Spatrick     errno = errno_EINVAL;
3293cab2bb3Spatrick     if (AllocatorMayReturnNull())
3303cab2bb3Spatrick       return nullptr;
3313cab2bb3Spatrick     ReportInvalidAllocationAlignment(alignment, stack);
3323cab2bb3Spatrick   }
3333cab2bb3Spatrick   return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
3343cab2bb3Spatrick }
3353cab2bb3Spatrick 
msan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)3363cab2bb3Spatrick int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
3373cab2bb3Spatrick                         StackTrace *stack) {
3383cab2bb3Spatrick   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
3393cab2bb3Spatrick     if (AllocatorMayReturnNull())
3403cab2bb3Spatrick       return errno_EINVAL;
3413cab2bb3Spatrick     ReportInvalidPosixMemalignAlignment(alignment, stack);
3423cab2bb3Spatrick   }
3433cab2bb3Spatrick   void *ptr = MsanAllocate(stack, size, alignment, false);
3443cab2bb3Spatrick   if (UNLIKELY(!ptr))
3453cab2bb3Spatrick     // OOM error is already taken care of by MsanAllocate.
3463cab2bb3Spatrick     return errno_ENOMEM;
3473cab2bb3Spatrick   CHECK(IsAligned((uptr)ptr, alignment));
3483cab2bb3Spatrick   *memptr = ptr;
3493cab2bb3Spatrick   return 0;
3503cab2bb3Spatrick }
3513cab2bb3Spatrick 
3523cab2bb3Spatrick } // namespace __msan
3533cab2bb3Spatrick 
3543cab2bb3Spatrick using namespace __msan;
3553cab2bb3Spatrick 
__sanitizer_get_current_allocated_bytes()3563cab2bb3Spatrick uptr __sanitizer_get_current_allocated_bytes() {
3573cab2bb3Spatrick   uptr stats[AllocatorStatCount];
3583cab2bb3Spatrick   allocator.GetStats(stats);
3593cab2bb3Spatrick   return stats[AllocatorStatAllocated];
3603cab2bb3Spatrick }
3613cab2bb3Spatrick 
__sanitizer_get_heap_size()3623cab2bb3Spatrick uptr __sanitizer_get_heap_size() {
3633cab2bb3Spatrick   uptr stats[AllocatorStatCount];
3643cab2bb3Spatrick   allocator.GetStats(stats);
3653cab2bb3Spatrick   return stats[AllocatorStatMapped];
3663cab2bb3Spatrick }
3673cab2bb3Spatrick 
__sanitizer_get_free_bytes()3683cab2bb3Spatrick uptr __sanitizer_get_free_bytes() { return 1; }
3693cab2bb3Spatrick 
__sanitizer_get_unmapped_bytes()3703cab2bb3Spatrick uptr __sanitizer_get_unmapped_bytes() { return 1; }
3713cab2bb3Spatrick 
__sanitizer_get_estimated_allocated_size(uptr size)3723cab2bb3Spatrick uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
3733cab2bb3Spatrick 
__sanitizer_get_ownership(const void * p)3743cab2bb3Spatrick int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
3753cab2bb3Spatrick 
__sanitizer_get_allocated_size(const void * p)3763cab2bb3Spatrick uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
377