xref: /openbsd-src/gnu/llvm/compiler-rt/lib/hwasan/hwasan_allocator.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of HWAddressSanitizer.
103cab2bb3Spatrick //
113cab2bb3Spatrick // HWAddressSanitizer allocator.
123cab2bb3Spatrick //===----------------------------------------------------------------------===//
133cab2bb3Spatrick 
143cab2bb3Spatrick #include "sanitizer_common/sanitizer_atomic.h"
153cab2bb3Spatrick #include "sanitizer_common/sanitizer_errno.h"
163cab2bb3Spatrick #include "sanitizer_common/sanitizer_stackdepot.h"
173cab2bb3Spatrick #include "hwasan.h"
183cab2bb3Spatrick #include "hwasan_allocator.h"
193cab2bb3Spatrick #include "hwasan_checks.h"
203cab2bb3Spatrick #include "hwasan_mapping.h"
213cab2bb3Spatrick #include "hwasan_malloc_bisect.h"
223cab2bb3Spatrick #include "hwasan_thread.h"
233cab2bb3Spatrick #include "hwasan_report.h"
24*810390e3Srobert #include "lsan/lsan_common.h"
253cab2bb3Spatrick 
263cab2bb3Spatrick namespace __hwasan {
273cab2bb3Spatrick 
283cab2bb3Spatrick static Allocator allocator;
293cab2bb3Spatrick static AllocatorCache fallback_allocator_cache;
303cab2bb3Spatrick static SpinMutex fallback_mutex;
313cab2bb3Spatrick static atomic_uint8_t hwasan_allocator_tagging_enabled;
323cab2bb3Spatrick 
33d89ec533Spatrick static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
34d89ec533Spatrick static constexpr tag_t kFallbackFreeTag = 0xBC;
353cab2bb3Spatrick 
36*810390e3Srobert enum {
37*810390e3Srobert   // Either just allocated by underlying allocator, but AsanChunk is not yet
38*810390e3Srobert   // ready, or almost returned to undelying allocator and AsanChunk is already
39*810390e3Srobert   // meaningless.
40*810390e3Srobert   CHUNK_INVALID = 0,
41*810390e3Srobert   // The chunk is allocated and not yet freed.
42*810390e3Srobert   CHUNK_ALLOCATED = 1,
433cab2bb3Spatrick };
443cab2bb3Spatrick 
45*810390e3Srobert 
463cab2bb3Spatrick // Initialized in HwasanAllocatorInit, an never changed.
473cab2bb3Spatrick static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
483cab2bb3Spatrick 
IsAllocated() const493cab2bb3Spatrick bool HwasanChunkView::IsAllocated() const {
50*810390e3Srobert   return metadata_ && metadata_->IsAllocated();
513cab2bb3Spatrick }
523cab2bb3Spatrick 
Beg() const533cab2bb3Spatrick uptr HwasanChunkView::Beg() const {
543cab2bb3Spatrick   return block_;
553cab2bb3Spatrick }
End() const563cab2bb3Spatrick uptr HwasanChunkView::End() const {
573cab2bb3Spatrick   return Beg() + UsedSize();
583cab2bb3Spatrick }
UsedSize() const593cab2bb3Spatrick uptr HwasanChunkView::UsedSize() const {
60*810390e3Srobert   return metadata_->GetRequestedSize();
613cab2bb3Spatrick }
GetAllocStackId() const623cab2bb3Spatrick u32 HwasanChunkView::GetAllocStackId() const {
63*810390e3Srobert   return metadata_->GetAllocStackId();
643cab2bb3Spatrick }
653cab2bb3Spatrick 
ActualSize() const663cab2bb3Spatrick uptr HwasanChunkView::ActualSize() const {
673cab2bb3Spatrick   return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
683cab2bb3Spatrick }
693cab2bb3Spatrick 
FromSmallHeap() const703cab2bb3Spatrick bool HwasanChunkView::FromSmallHeap() const {
713cab2bb3Spatrick   return allocator.FromPrimary(reinterpret_cast<void *>(block_));
723cab2bb3Spatrick }
733cab2bb3Spatrick 
AddrIsInside(uptr addr) const74*810390e3Srobert bool HwasanChunkView::AddrIsInside(uptr addr) const {
75*810390e3Srobert   return (addr >= Beg()) && (addr < Beg() + UsedSize());
76*810390e3Srobert }
77*810390e3Srobert 
SetAllocated(u32 stack,u64 size)78*810390e3Srobert inline void Metadata::SetAllocated(u32 stack, u64 size) {
79*810390e3Srobert   Thread *t = GetCurrentThread();
80*810390e3Srobert   u64 context = t ? t->unique_id() : kMainTid;
81*810390e3Srobert   context <<= 32;
82*810390e3Srobert   context += stack;
83*810390e3Srobert   requested_size_low = size & ((1ul << 32) - 1);
84*810390e3Srobert   requested_size_high = size >> 32;
85*810390e3Srobert   atomic_store(&alloc_context_id, context, memory_order_relaxed);
86*810390e3Srobert   atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
87*810390e3Srobert }
88*810390e3Srobert 
SetUnallocated()89*810390e3Srobert inline void Metadata::SetUnallocated() {
90*810390e3Srobert   atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
91*810390e3Srobert   requested_size_low = 0;
92*810390e3Srobert   requested_size_high = 0;
93*810390e3Srobert   atomic_store(&alloc_context_id, 0, memory_order_relaxed);
94*810390e3Srobert }
95*810390e3Srobert 
IsAllocated() const96*810390e3Srobert inline bool Metadata::IsAllocated() const {
97*810390e3Srobert   return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED &&
98*810390e3Srobert          GetRequestedSize();
99*810390e3Srobert }
100*810390e3Srobert 
GetRequestedSize() const101*810390e3Srobert inline u64 Metadata::GetRequestedSize() const {
102*810390e3Srobert   return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
103*810390e3Srobert }
104*810390e3Srobert 
GetAllocStackId() const105*810390e3Srobert inline u32 Metadata::GetAllocStackId() const {
106*810390e3Srobert   return atomic_load(&alloc_context_id, memory_order_relaxed);
107*810390e3Srobert }
108*810390e3Srobert 
GetAllocatorStats(AllocatorStatCounters s)1093cab2bb3Spatrick void GetAllocatorStats(AllocatorStatCounters s) {
1103cab2bb3Spatrick   allocator.GetStats(s);
1113cab2bb3Spatrick }
1123cab2bb3Spatrick 
SetLsanTag(__lsan::ChunkTag tag)113*810390e3Srobert inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
114*810390e3Srobert   lsan_tag = tag;
115*810390e3Srobert }
116*810390e3Srobert 
GetLsanTag() const117*810390e3Srobert inline __lsan::ChunkTag Metadata::GetLsanTag() const {
118*810390e3Srobert   return static_cast<__lsan::ChunkTag>(lsan_tag);
119*810390e3Srobert }
120*810390e3Srobert 
GetAliasRegionStart()121d89ec533Spatrick uptr GetAliasRegionStart() {
122d89ec533Spatrick #if defined(HWASAN_ALIASING_MODE)
123d89ec533Spatrick   constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
124d89ec533Spatrick   uptr AliasRegionStart =
125d89ec533Spatrick       __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
126d89ec533Spatrick 
127d89ec533Spatrick   CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
128d89ec533Spatrick            __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
129d89ec533Spatrick   CHECK_EQ(
130d89ec533Spatrick       (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
131d89ec533Spatrick       __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
132d89ec533Spatrick   return AliasRegionStart;
133d89ec533Spatrick #else
134d89ec533Spatrick   return 0;
135d89ec533Spatrick #endif
136d89ec533Spatrick }
137d89ec533Spatrick 
HwasanAllocatorInit()1383cab2bb3Spatrick void HwasanAllocatorInit() {
1393cab2bb3Spatrick   atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
1403cab2bb3Spatrick                        !flags()->disable_allocator_tagging);
1413cab2bb3Spatrick   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
142d89ec533Spatrick   allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
143d89ec533Spatrick                  GetAliasRegionStart());
1443cab2bb3Spatrick   for (uptr i = 0; i < sizeof(tail_magic); i++)
1453cab2bb3Spatrick     tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
1463cab2bb3Spatrick }
1473cab2bb3Spatrick 
HwasanAllocatorLock()148*810390e3Srobert void HwasanAllocatorLock() { allocator.ForceLock(); }
149*810390e3Srobert 
HwasanAllocatorUnlock()150*810390e3Srobert void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
151*810390e3Srobert 
AllocatorSwallowThreadLocalCache(AllocatorCache * cache)1523cab2bb3Spatrick void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
1533cab2bb3Spatrick   allocator.SwallowCache(cache);
1543cab2bb3Spatrick }
1553cab2bb3Spatrick 
TaggedSize(uptr size)1563cab2bb3Spatrick static uptr TaggedSize(uptr size) {
1573cab2bb3Spatrick   if (!size) size = 1;
1583cab2bb3Spatrick   uptr new_size = RoundUpTo(size, kShadowAlignment);
1593cab2bb3Spatrick   CHECK_GE(new_size, size);
1603cab2bb3Spatrick   return new_size;
1613cab2bb3Spatrick }
1623cab2bb3Spatrick 
HwasanAllocate(StackTrace * stack,uptr orig_size,uptr alignment,bool zeroise)1633cab2bb3Spatrick static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
1643cab2bb3Spatrick                             bool zeroise) {
1653cab2bb3Spatrick   if (orig_size > kMaxAllowedMallocSize) {
1663cab2bb3Spatrick     if (AllocatorMayReturnNull()) {
1673cab2bb3Spatrick       Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
1683cab2bb3Spatrick              orig_size);
1693cab2bb3Spatrick       return nullptr;
1703cab2bb3Spatrick     }
1713cab2bb3Spatrick     ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
1723cab2bb3Spatrick   }
173*810390e3Srobert   if (UNLIKELY(IsRssLimitExceeded())) {
174*810390e3Srobert     if (AllocatorMayReturnNull())
175*810390e3Srobert       return nullptr;
176*810390e3Srobert     ReportRssLimitExceeded(stack);
177*810390e3Srobert   }
1783cab2bb3Spatrick 
1793cab2bb3Spatrick   alignment = Max(alignment, kShadowAlignment);
1803cab2bb3Spatrick   uptr size = TaggedSize(orig_size);
1813cab2bb3Spatrick   Thread *t = GetCurrentThread();
1823cab2bb3Spatrick   void *allocated;
1833cab2bb3Spatrick   if (t) {
1843cab2bb3Spatrick     allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
1853cab2bb3Spatrick   } else {
1863cab2bb3Spatrick     SpinMutexLock l(&fallback_mutex);
1873cab2bb3Spatrick     AllocatorCache *cache = &fallback_allocator_cache;
1883cab2bb3Spatrick     allocated = allocator.Allocate(cache, size, alignment);
1893cab2bb3Spatrick   }
1903cab2bb3Spatrick   if (UNLIKELY(!allocated)) {
1913cab2bb3Spatrick     SetAllocatorOutOfMemory();
1923cab2bb3Spatrick     if (AllocatorMayReturnNull())
1933cab2bb3Spatrick       return nullptr;
1943cab2bb3Spatrick     ReportOutOfMemory(size, stack);
1953cab2bb3Spatrick   }
1963cab2bb3Spatrick   if (zeroise) {
1973cab2bb3Spatrick     internal_memset(allocated, 0, size);
1983cab2bb3Spatrick   } else if (flags()->max_malloc_fill_size > 0) {
1993cab2bb3Spatrick     uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
2003cab2bb3Spatrick     internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
2013cab2bb3Spatrick   }
2023cab2bb3Spatrick   if (size != orig_size) {
203*810390e3Srobert     u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
204*810390e3Srobert     uptr tail_length = size - orig_size;
205*810390e3Srobert     internal_memcpy(tail, tail_magic, tail_length - 1);
206*810390e3Srobert     // Short granule is excluded from magic tail, so we explicitly untag.
207*810390e3Srobert     tail[tail_length - 1] = 0;
2083cab2bb3Spatrick   }
2093cab2bb3Spatrick 
2103cab2bb3Spatrick   void *user_ptr = allocated;
2113cab2bb3Spatrick   // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
2123cab2bb3Spatrick   // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
2133cab2bb3Spatrick   // retag to 0.
214d89ec533Spatrick   if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
215d89ec533Spatrick       (flags()->tag_in_malloc || flags()->tag_in_free) &&
2163cab2bb3Spatrick       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
2173cab2bb3Spatrick     if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
2183cab2bb3Spatrick       tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
2193cab2bb3Spatrick       uptr tag_size = orig_size ? orig_size : 1;
2203cab2bb3Spatrick       uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
2213cab2bb3Spatrick       user_ptr =
2223cab2bb3Spatrick           (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
2233cab2bb3Spatrick       if (full_granule_size != tag_size) {
2243cab2bb3Spatrick         u8 *short_granule =
2253cab2bb3Spatrick             reinterpret_cast<u8 *>(allocated) + full_granule_size;
2263cab2bb3Spatrick         TagMemoryAligned((uptr)short_granule, kShadowAlignment,
2273cab2bb3Spatrick                          tag_size % kShadowAlignment);
2283cab2bb3Spatrick         short_granule[kShadowAlignment - 1] = tag;
2293cab2bb3Spatrick       }
2303cab2bb3Spatrick     } else {
2313cab2bb3Spatrick       user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
2323cab2bb3Spatrick     }
2333cab2bb3Spatrick   }
2343cab2bb3Spatrick 
235*810390e3Srobert   Metadata *meta =
236*810390e3Srobert       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
237*810390e3Srobert #if CAN_SANITIZE_LEAKS
238*810390e3Srobert   meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
239*810390e3Srobert                                                   : __lsan::kDirectlyLeaked);
240*810390e3Srobert #endif
241*810390e3Srobert   meta->SetAllocated(StackDepotPut(*stack), orig_size);
242*810390e3Srobert   RunMallocHooks(user_ptr, size);
2433cab2bb3Spatrick   return user_ptr;
2443cab2bb3Spatrick }
2453cab2bb3Spatrick 
PointerAndMemoryTagsMatch(void * tagged_ptr)2463cab2bb3Spatrick static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
2473cab2bb3Spatrick   CHECK(tagged_ptr);
2483cab2bb3Spatrick   uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
249d89ec533Spatrick   if (!InTaggableRegion(tagged_uptr))
250d89ec533Spatrick     return true;
2513cab2bb3Spatrick   tag_t mem_tag = *reinterpret_cast<tag_t *>(
2523cab2bb3Spatrick       MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
2533cab2bb3Spatrick   return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
2543cab2bb3Spatrick }
2553cab2bb3Spatrick 
CheckInvalidFree(StackTrace * stack,void * untagged_ptr,void * tagged_ptr)256*810390e3Srobert static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
257*810390e3Srobert                              void *tagged_ptr) {
258*810390e3Srobert   // This function can return true if halt_on_error is false.
259*810390e3Srobert   if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
260*810390e3Srobert       !PointerAndMemoryTagsMatch(tagged_ptr)) {
261*810390e3Srobert     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
262*810390e3Srobert     return true;
263*810390e3Srobert   }
264*810390e3Srobert   return false;
265*810390e3Srobert }
266*810390e3Srobert 
HwasanDeallocate(StackTrace * stack,void * tagged_ptr)2673cab2bb3Spatrick static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
2683cab2bb3Spatrick   CHECK(tagged_ptr);
269*810390e3Srobert   RunFreeHooks(tagged_ptr);
2703cab2bb3Spatrick 
271*810390e3Srobert   bool in_taggable_region =
272*810390e3Srobert       InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
273*810390e3Srobert   void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
2743cab2bb3Spatrick 
275*810390e3Srobert   if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
276*810390e3Srobert     return;
277*810390e3Srobert 
2783cab2bb3Spatrick   void *aligned_ptr = reinterpret_cast<void *>(
2793cab2bb3Spatrick       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
280d89ec533Spatrick   tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
2813cab2bb3Spatrick   Metadata *meta =
2823cab2bb3Spatrick       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
283*810390e3Srobert   if (!meta) {
284*810390e3Srobert     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
285*810390e3Srobert     return;
286*810390e3Srobert   }
287*810390e3Srobert   uptr orig_size = meta->GetRequestedSize();
2883cab2bb3Spatrick   u32 free_context_id = StackDepotPut(*stack);
289*810390e3Srobert   u32 alloc_context_id = meta->GetAllocStackId();
2903cab2bb3Spatrick 
2913cab2bb3Spatrick   // Check tail magic.
2923cab2bb3Spatrick   uptr tagged_size = TaggedSize(orig_size);
2933cab2bb3Spatrick   if (flags()->free_checks_tail_magic && orig_size &&
2943cab2bb3Spatrick       tagged_size != orig_size) {
2953cab2bb3Spatrick     uptr tail_size = tagged_size - orig_size - 1;
2963cab2bb3Spatrick     CHECK_LT(tail_size, kShadowAlignment);
2973cab2bb3Spatrick     void *tail_beg = reinterpret_cast<void *>(
2983cab2bb3Spatrick         reinterpret_cast<uptr>(aligned_ptr) + orig_size);
299*810390e3Srobert     tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
300*810390e3Srobert         reinterpret_cast<uptr>(tail_beg) + tail_size));
301*810390e3Srobert     if (tail_size &&
302*810390e3Srobert         (internal_memcmp(tail_beg, tail_magic, tail_size) ||
303*810390e3Srobert          (in_taggable_region && pointer_tag != short_granule_memtag)))
3043cab2bb3Spatrick       ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
3053cab2bb3Spatrick                             orig_size, tail_magic);
3063cab2bb3Spatrick   }
3073cab2bb3Spatrick 
308*810390e3Srobert   // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
309*810390e3Srobert   meta->SetUnallocated();
3103cab2bb3Spatrick   // This memory will not be reused by anyone else, so we are free to keep it
3113cab2bb3Spatrick   // poisoned.
3123cab2bb3Spatrick   Thread *t = GetCurrentThread();
3133cab2bb3Spatrick   if (flags()->max_free_fill_size > 0) {
3143cab2bb3Spatrick     uptr fill_size =
3153cab2bb3Spatrick         Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
3163cab2bb3Spatrick     internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
3173cab2bb3Spatrick   }
318*810390e3Srobert   if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
319d89ec533Spatrick       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
320d89ec533Spatrick     // Always store full 8-bit tags on free to maximize UAF detection.
321d89ec533Spatrick     tag_t tag;
322d89ec533Spatrick     if (t) {
323d89ec533Spatrick       // Make sure we are not using a short granule tag as a poison tag. This
324d89ec533Spatrick       // would make us attempt to read the memory on a UaF.
325d89ec533Spatrick       // The tag can be zero if tagging is disabled on this thread.
326d89ec533Spatrick       do {
327d89ec533Spatrick         tag = t->GenerateRandomTag(/*num_bits=*/8);
328d89ec533Spatrick       } while (
329d89ec533Spatrick           UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
330d89ec533Spatrick     } else {
331d89ec533Spatrick       static_assert(kFallbackFreeTag >= kShadowAlignment,
332d89ec533Spatrick                     "fallback tag must not be a short granule tag.");
333d89ec533Spatrick       tag = kFallbackFreeTag;
334d89ec533Spatrick     }
3353cab2bb3Spatrick     TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
336d89ec533Spatrick                      tag);
337d89ec533Spatrick   }
3383cab2bb3Spatrick   if (t) {
3393cab2bb3Spatrick     allocator.Deallocate(t->allocator_cache(), aligned_ptr);
3403cab2bb3Spatrick     if (auto *ha = t->heap_allocations())
3413cab2bb3Spatrick       ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
3423cab2bb3Spatrick                 free_context_id, static_cast<u32>(orig_size)});
3433cab2bb3Spatrick   } else {
3443cab2bb3Spatrick     SpinMutexLock l(&fallback_mutex);
3453cab2bb3Spatrick     AllocatorCache *cache = &fallback_allocator_cache;
3463cab2bb3Spatrick     allocator.Deallocate(cache, aligned_ptr);
3473cab2bb3Spatrick   }
3483cab2bb3Spatrick }
3493cab2bb3Spatrick 
HwasanReallocate(StackTrace * stack,void * tagged_ptr_old,uptr new_size,uptr alignment)3503cab2bb3Spatrick static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
3513cab2bb3Spatrick                               uptr new_size, uptr alignment) {
352*810390e3Srobert   void *untagged_ptr_old =
353*810390e3Srobert       InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
354*810390e3Srobert           ? UntagPtr(tagged_ptr_old)
355*810390e3Srobert           : tagged_ptr_old;
356*810390e3Srobert   if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
357*810390e3Srobert     return nullptr;
3583cab2bb3Spatrick   void *tagged_ptr_new =
3593cab2bb3Spatrick       HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
3603cab2bb3Spatrick   if (tagged_ptr_old && tagged_ptr_new) {
3613cab2bb3Spatrick     Metadata *meta =
3623cab2bb3Spatrick         reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
363d89ec533Spatrick     internal_memcpy(
364d89ec533Spatrick         UntagPtr(tagged_ptr_new), untagged_ptr_old,
365*810390e3Srobert         Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
3663cab2bb3Spatrick     HwasanDeallocate(stack, tagged_ptr_old);
3673cab2bb3Spatrick   }
3683cab2bb3Spatrick   return tagged_ptr_new;
3693cab2bb3Spatrick }
3703cab2bb3Spatrick 
HwasanCalloc(StackTrace * stack,uptr nmemb,uptr size)3713cab2bb3Spatrick static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
3723cab2bb3Spatrick   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
3733cab2bb3Spatrick     if (AllocatorMayReturnNull())
3743cab2bb3Spatrick       return nullptr;
3753cab2bb3Spatrick     ReportCallocOverflow(nmemb, size, stack);
3763cab2bb3Spatrick   }
3773cab2bb3Spatrick   return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
3783cab2bb3Spatrick }
3793cab2bb3Spatrick 
FindHeapChunkByAddress(uptr address)3803cab2bb3Spatrick HwasanChunkView FindHeapChunkByAddress(uptr address) {
381*810390e3Srobert   if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
382*810390e3Srobert     return HwasanChunkView();
3833cab2bb3Spatrick   void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
3843cab2bb3Spatrick   if (!block)
3853cab2bb3Spatrick     return HwasanChunkView();
3863cab2bb3Spatrick   Metadata *metadata =
3873cab2bb3Spatrick       reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
3883cab2bb3Spatrick   return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
3893cab2bb3Spatrick }
3903cab2bb3Spatrick 
AllocationSize(const void * tagged_ptr)3913cab2bb3Spatrick static uptr AllocationSize(const void *tagged_ptr) {
3923cab2bb3Spatrick   const void *untagged_ptr = UntagPtr(tagged_ptr);
3933cab2bb3Spatrick   if (!untagged_ptr) return 0;
3943cab2bb3Spatrick   const void *beg = allocator.GetBlockBegin(untagged_ptr);
3953cab2bb3Spatrick   Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
3963cab2bb3Spatrick   if (beg != untagged_ptr) return 0;
397*810390e3Srobert   return b->GetRequestedSize();
3983cab2bb3Spatrick }
3993cab2bb3Spatrick 
hwasan_malloc(uptr size,StackTrace * stack)4003cab2bb3Spatrick void *hwasan_malloc(uptr size, StackTrace *stack) {
4013cab2bb3Spatrick   return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
4023cab2bb3Spatrick }
4033cab2bb3Spatrick 
hwasan_calloc(uptr nmemb,uptr size,StackTrace * stack)4043cab2bb3Spatrick void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
4053cab2bb3Spatrick   return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
4063cab2bb3Spatrick }
4073cab2bb3Spatrick 
hwasan_realloc(void * ptr,uptr size,StackTrace * stack)4083cab2bb3Spatrick void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
4093cab2bb3Spatrick   if (!ptr)
4103cab2bb3Spatrick     return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
4113cab2bb3Spatrick   if (size == 0) {
4123cab2bb3Spatrick     HwasanDeallocate(stack, ptr);
4133cab2bb3Spatrick     return nullptr;
4143cab2bb3Spatrick   }
4153cab2bb3Spatrick   return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
4163cab2bb3Spatrick }
4173cab2bb3Spatrick 
hwasan_reallocarray(void * ptr,uptr nmemb,uptr size,StackTrace * stack)4183cab2bb3Spatrick void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
4193cab2bb3Spatrick   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
4203cab2bb3Spatrick     errno = errno_ENOMEM;
4213cab2bb3Spatrick     if (AllocatorMayReturnNull())
4223cab2bb3Spatrick       return nullptr;
4233cab2bb3Spatrick     ReportReallocArrayOverflow(nmemb, size, stack);
4243cab2bb3Spatrick   }
4253cab2bb3Spatrick   return hwasan_realloc(ptr, nmemb * size, stack);
4263cab2bb3Spatrick }
4273cab2bb3Spatrick 
hwasan_valloc(uptr size,StackTrace * stack)4283cab2bb3Spatrick void *hwasan_valloc(uptr size, StackTrace *stack) {
4293cab2bb3Spatrick   return SetErrnoOnNull(
4303cab2bb3Spatrick       HwasanAllocate(stack, size, GetPageSizeCached(), false));
4313cab2bb3Spatrick }
4323cab2bb3Spatrick 
hwasan_pvalloc(uptr size,StackTrace * stack)4333cab2bb3Spatrick void *hwasan_pvalloc(uptr size, StackTrace *stack) {
4343cab2bb3Spatrick   uptr PageSize = GetPageSizeCached();
4353cab2bb3Spatrick   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
4363cab2bb3Spatrick     errno = errno_ENOMEM;
4373cab2bb3Spatrick     if (AllocatorMayReturnNull())
4383cab2bb3Spatrick       return nullptr;
4393cab2bb3Spatrick     ReportPvallocOverflow(size, stack);
4403cab2bb3Spatrick   }
4413cab2bb3Spatrick   // pvalloc(0) should allocate one page.
4423cab2bb3Spatrick   size = size ? RoundUpTo(size, PageSize) : PageSize;
4433cab2bb3Spatrick   return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
4443cab2bb3Spatrick }
4453cab2bb3Spatrick 
hwasan_aligned_alloc(uptr alignment,uptr size,StackTrace * stack)4463cab2bb3Spatrick void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
4473cab2bb3Spatrick   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
4483cab2bb3Spatrick     errno = errno_EINVAL;
4493cab2bb3Spatrick     if (AllocatorMayReturnNull())
4503cab2bb3Spatrick       return nullptr;
4513cab2bb3Spatrick     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
4523cab2bb3Spatrick   }
4533cab2bb3Spatrick   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
4543cab2bb3Spatrick }
4553cab2bb3Spatrick 
hwasan_memalign(uptr alignment,uptr size,StackTrace * stack)4563cab2bb3Spatrick void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
4573cab2bb3Spatrick   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
4583cab2bb3Spatrick     errno = errno_EINVAL;
4593cab2bb3Spatrick     if (AllocatorMayReturnNull())
4603cab2bb3Spatrick       return nullptr;
4613cab2bb3Spatrick     ReportInvalidAllocationAlignment(alignment, stack);
4623cab2bb3Spatrick   }
4633cab2bb3Spatrick   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
4643cab2bb3Spatrick }
4653cab2bb3Spatrick 
hwasan_posix_memalign(void ** memptr,uptr alignment,uptr size,StackTrace * stack)4663cab2bb3Spatrick int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
4673cab2bb3Spatrick                         StackTrace *stack) {
4683cab2bb3Spatrick   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
4693cab2bb3Spatrick     if (AllocatorMayReturnNull())
4703cab2bb3Spatrick       return errno_EINVAL;
4713cab2bb3Spatrick     ReportInvalidPosixMemalignAlignment(alignment, stack);
4723cab2bb3Spatrick   }
4733cab2bb3Spatrick   void *ptr = HwasanAllocate(stack, size, alignment, false);
4743cab2bb3Spatrick   if (UNLIKELY(!ptr))
4753cab2bb3Spatrick     // OOM error is already taken care of by HwasanAllocate.
4763cab2bb3Spatrick     return errno_ENOMEM;
4773cab2bb3Spatrick   CHECK(IsAligned((uptr)ptr, alignment));
478d89ec533Spatrick   *memptr = ptr;
4793cab2bb3Spatrick   return 0;
4803cab2bb3Spatrick }
4813cab2bb3Spatrick 
hwasan_free(void * ptr,StackTrace * stack)4823cab2bb3Spatrick void hwasan_free(void *ptr, StackTrace *stack) {
4833cab2bb3Spatrick   return HwasanDeallocate(stack, ptr);
4843cab2bb3Spatrick }
4853cab2bb3Spatrick 
4863cab2bb3Spatrick }  // namespace __hwasan
4873cab2bb3Spatrick 
488*810390e3Srobert // --- Implementation of LSan-specific functions --- {{{1
489*810390e3Srobert namespace __lsan {
490*810390e3Srobert 
LockAllocator()491*810390e3Srobert void LockAllocator() {
492*810390e3Srobert   __hwasan::HwasanAllocatorLock();
493*810390e3Srobert }
494*810390e3Srobert 
UnlockAllocator()495*810390e3Srobert void UnlockAllocator() {
496*810390e3Srobert   __hwasan::HwasanAllocatorUnlock();
497*810390e3Srobert }
498*810390e3Srobert 
GetAllocatorGlobalRange(uptr * begin,uptr * end)499*810390e3Srobert void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
500*810390e3Srobert   *begin = (uptr)&__hwasan::allocator;
501*810390e3Srobert   *end = *begin + sizeof(__hwasan::allocator);
502*810390e3Srobert }
503*810390e3Srobert 
PointsIntoChunk(void * p)504*810390e3Srobert uptr PointsIntoChunk(void *p) {
505*810390e3Srobert   p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
506*810390e3Srobert   uptr addr = reinterpret_cast<uptr>(p);
507*810390e3Srobert   uptr chunk =
508*810390e3Srobert       reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
509*810390e3Srobert   if (!chunk)
510*810390e3Srobert     return 0;
511*810390e3Srobert   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
512*810390e3Srobert       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
513*810390e3Srobert   if (!metadata || !metadata->IsAllocated())
514*810390e3Srobert     return 0;
515*810390e3Srobert   if (addr < chunk + metadata->GetRequestedSize())
516*810390e3Srobert     return chunk;
517*810390e3Srobert   if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
518*810390e3Srobert     return chunk;
519*810390e3Srobert   return 0;
520*810390e3Srobert }
521*810390e3Srobert 
GetUserBegin(uptr chunk)522*810390e3Srobert uptr GetUserBegin(uptr chunk) {
523*810390e3Srobert   if (__hwasan::InTaggableRegion(chunk))
524*810390e3Srobert     CHECK_EQ(UntagAddr(chunk), chunk);
525*810390e3Srobert   void *block = __hwasan::allocator.GetBlockBeginFastLocked(
526*810390e3Srobert       reinterpret_cast<void *>(chunk));
527*810390e3Srobert   if (!block)
528*810390e3Srobert     return 0;
529*810390e3Srobert   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
530*810390e3Srobert       __hwasan::allocator.GetMetaData(block));
531*810390e3Srobert   if (!metadata || !metadata->IsAllocated())
532*810390e3Srobert     return 0;
533*810390e3Srobert 
534*810390e3Srobert   return reinterpret_cast<uptr>(block);
535*810390e3Srobert }
536*810390e3Srobert 
LsanMetadata(uptr chunk)537*810390e3Srobert LsanMetadata::LsanMetadata(uptr chunk) {
538*810390e3Srobert   if (__hwasan::InTaggableRegion(chunk))
539*810390e3Srobert     CHECK_EQ(UntagAddr(chunk), chunk);
540*810390e3Srobert   metadata_ =
541*810390e3Srobert       chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
542*810390e3Srobert             : nullptr;
543*810390e3Srobert }
544*810390e3Srobert 
allocated() const545*810390e3Srobert bool LsanMetadata::allocated() const {
546*810390e3Srobert   if (!metadata_)
547*810390e3Srobert     return false;
548*810390e3Srobert   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
549*810390e3Srobert   return m->IsAllocated();
550*810390e3Srobert }
551*810390e3Srobert 
tag() const552*810390e3Srobert ChunkTag LsanMetadata::tag() const {
553*810390e3Srobert   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
554*810390e3Srobert   return m->GetLsanTag();
555*810390e3Srobert }
556*810390e3Srobert 
set_tag(ChunkTag value)557*810390e3Srobert void LsanMetadata::set_tag(ChunkTag value) {
558*810390e3Srobert   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
559*810390e3Srobert   m->SetLsanTag(value);
560*810390e3Srobert }
561*810390e3Srobert 
requested_size() const562*810390e3Srobert uptr LsanMetadata::requested_size() const {
563*810390e3Srobert   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
564*810390e3Srobert   return m->GetRequestedSize();
565*810390e3Srobert }
566*810390e3Srobert 
stack_trace_id() const567*810390e3Srobert u32 LsanMetadata::stack_trace_id() const {
568*810390e3Srobert   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
569*810390e3Srobert   return m->GetAllocStackId();
570*810390e3Srobert }
571*810390e3Srobert 
ForEachChunk(ForEachChunkCallback callback,void * arg)572*810390e3Srobert void ForEachChunk(ForEachChunkCallback callback, void *arg) {
573*810390e3Srobert   __hwasan::allocator.ForEachChunk(callback, arg);
574*810390e3Srobert }
575*810390e3Srobert 
IgnoreObjectLocked(const void * p)576*810390e3Srobert IgnoreObjectResult IgnoreObjectLocked(const void *p) {
577*810390e3Srobert   p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
578*810390e3Srobert   uptr addr = reinterpret_cast<uptr>(p);
579*810390e3Srobert   uptr chunk =
580*810390e3Srobert       reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
581*810390e3Srobert   if (!chunk)
582*810390e3Srobert     return kIgnoreObjectInvalid;
583*810390e3Srobert   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
584*810390e3Srobert       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
585*810390e3Srobert   if (!metadata || !metadata->IsAllocated())
586*810390e3Srobert     return kIgnoreObjectInvalid;
587*810390e3Srobert   if (addr >= chunk + metadata->GetRequestedSize())
588*810390e3Srobert     return kIgnoreObjectInvalid;
589*810390e3Srobert   if (metadata->GetLsanTag() == kIgnored)
590*810390e3Srobert     return kIgnoreObjectAlreadyIgnored;
591*810390e3Srobert 
592*810390e3Srobert   metadata->SetLsanTag(kIgnored);
593*810390e3Srobert   return kIgnoreObjectSuccess;
594*810390e3Srobert }
595*810390e3Srobert 
596*810390e3Srobert }  // namespace __lsan
597*810390e3Srobert 
5983cab2bb3Spatrick using namespace __hwasan;
5993cab2bb3Spatrick 
__hwasan_enable_allocator_tagging()6003cab2bb3Spatrick void __hwasan_enable_allocator_tagging() {
6013cab2bb3Spatrick   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
6023cab2bb3Spatrick }
6033cab2bb3Spatrick 
__hwasan_disable_allocator_tagging()6043cab2bb3Spatrick void __hwasan_disable_allocator_tagging() {
6053cab2bb3Spatrick   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
6063cab2bb3Spatrick }
6073cab2bb3Spatrick 
__sanitizer_get_current_allocated_bytes()6083cab2bb3Spatrick uptr __sanitizer_get_current_allocated_bytes() {
6093cab2bb3Spatrick   uptr stats[AllocatorStatCount];
6103cab2bb3Spatrick   allocator.GetStats(stats);
6113cab2bb3Spatrick   return stats[AllocatorStatAllocated];
6123cab2bb3Spatrick }
6133cab2bb3Spatrick 
__sanitizer_get_heap_size()6143cab2bb3Spatrick uptr __sanitizer_get_heap_size() {
6153cab2bb3Spatrick   uptr stats[AllocatorStatCount];
6163cab2bb3Spatrick   allocator.GetStats(stats);
6173cab2bb3Spatrick   return stats[AllocatorStatMapped];
6183cab2bb3Spatrick }
6193cab2bb3Spatrick 
__sanitizer_get_free_bytes()6203cab2bb3Spatrick uptr __sanitizer_get_free_bytes() { return 1; }
6213cab2bb3Spatrick 
__sanitizer_get_unmapped_bytes()6223cab2bb3Spatrick uptr __sanitizer_get_unmapped_bytes() { return 1; }
6233cab2bb3Spatrick 
__sanitizer_get_estimated_allocated_size(uptr size)6243cab2bb3Spatrick uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
6253cab2bb3Spatrick 
__sanitizer_get_ownership(const void * p)6263cab2bb3Spatrick int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
6273cab2bb3Spatrick 
__sanitizer_get_allocated_size(const void * p)6283cab2bb3Spatrick uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
629