xref: /llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp (revision ba66d60b1caa9cb6fd77d69cc36c530916e68936)
1e3b6d110SNico Weber //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2e3b6d110SNico Weber //
3e3b6d110SNico Weber // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e3b6d110SNico Weber // See https://llvm.org/LICENSE.txt for license information.
5e3b6d110SNico Weber // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e3b6d110SNico Weber //
7e3b6d110SNico Weber //===----------------------------------------------------------------------===//
8e3b6d110SNico Weber //
9e3b6d110SNico Weber // This file is a part of HWAddressSanitizer.
10e3b6d110SNico Weber //
11e3b6d110SNico Weber // HWAddressSanitizer allocator.
12e3b6d110SNico Weber //===----------------------------------------------------------------------===//
13e3b6d110SNico Weber 
14e3b6d110SNico Weber #include "sanitizer_common/sanitizer_atomic.h"
15e3b6d110SNico Weber #include "sanitizer_common/sanitizer_errno.h"
16e3b6d110SNico Weber #include "sanitizer_common/sanitizer_stackdepot.h"
17e3b6d110SNico Weber #include "hwasan.h"
18e3b6d110SNico Weber #include "hwasan_allocator.h"
191366262bSPeter Collingbourne #include "hwasan_checks.h"
20e3b6d110SNico Weber #include "hwasan_mapping.h"
21e3b6d110SNico Weber #include "hwasan_malloc_bisect.h"
22e3b6d110SNico Weber #include "hwasan_thread.h"
23e3b6d110SNico Weber #include "hwasan_report.h"
241b0d63c5SKirill Stoimenov #include "lsan/lsan_common.h"
25e3b6d110SNico Weber 
26e3b6d110SNico Weber namespace __hwasan {
27e3b6d110SNico Weber 
28e3b6d110SNico Weber static Allocator allocator;
29e3b6d110SNico Weber static AllocatorCache fallback_allocator_cache;
30e3b6d110SNico Weber static SpinMutex fallback_mutex;
31e3b6d110SNico Weber static atomic_uint8_t hwasan_allocator_tagging_enabled;
32e3b6d110SNico Weber 
333e4faf08SMatt Morehouse static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
343e4faf08SMatt Morehouse static constexpr tag_t kFallbackFreeTag = 0xBC;
35e3b6d110SNico Weber 
361b0d63c5SKirill Stoimenov enum {
371b0d63c5SKirill Stoimenov   // Either just allocated by underlying allocator, but AsanChunk is not yet
381b0d63c5SKirill Stoimenov   // ready, or almost returned to undelying allocator and AsanChunk is already
391b0d63c5SKirill Stoimenov   // meaningless.
401b0d63c5SKirill Stoimenov   CHUNK_INVALID = 0,
411b0d63c5SKirill Stoimenov   // The chunk is allocated and not yet freed.
421b0d63c5SKirill Stoimenov   CHUNK_ALLOCATED = 1,
43e3b6d110SNico Weber };
44e3b6d110SNico Weber 
451b0d63c5SKirill Stoimenov 
46e3b6d110SNico Weber // Initialized in HwasanAllocatorInit, an never changed.
47*ba66d60bSFangrui Song alignas(16) static u8 tail_magic[kShadowAlignment - 1];
48d87468e5SKirill Stoimenov static uptr max_malloc_size;
49e3b6d110SNico Weber 
50e3b6d110SNico Weber bool HwasanChunkView::IsAllocated() const {
511b0d63c5SKirill Stoimenov   return metadata_ && metadata_->IsAllocated();
52e3b6d110SNico Weber }
53e3b6d110SNico Weber 
54e3b6d110SNico Weber uptr HwasanChunkView::Beg() const {
55e3b6d110SNico Weber   return block_;
56e3b6d110SNico Weber }
57e3b6d110SNico Weber uptr HwasanChunkView::End() const {
58e3b6d110SNico Weber   return Beg() + UsedSize();
59e3b6d110SNico Weber }
60e3b6d110SNico Weber uptr HwasanChunkView::UsedSize() const {
61ee56d88bSKirill Stoimenov   return metadata_->GetRequestedSize();
62e3b6d110SNico Weber }
63e3b6d110SNico Weber u32 HwasanChunkView::GetAllocStackId() const {
641b0d63c5SKirill Stoimenov   return metadata_->GetAllocStackId();
65e3b6d110SNico Weber }
66e3b6d110SNico Weber 
67dc7498ebSEnna1 u32 HwasanChunkView::GetAllocThreadId() const {
68dc7498ebSEnna1   return metadata_->GetAllocThreadId();
69dc7498ebSEnna1 }
70dc7498ebSEnna1 
71e3b6d110SNico Weber uptr HwasanChunkView::ActualSize() const {
72e3b6d110SNico Weber   return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
73e3b6d110SNico Weber }
74e3b6d110SNico Weber 
75e3b6d110SNico Weber bool HwasanChunkView::FromSmallHeap() const {
76e3b6d110SNico Weber   return allocator.FromPrimary(reinterpret_cast<void *>(block_));
77e3b6d110SNico Weber }
78e3b6d110SNico Weber 
79a3587ea6SKirill Stoimenov bool HwasanChunkView::AddrIsInside(uptr addr) const {
80a3587ea6SKirill Stoimenov   return (addr >= Beg()) && (addr < Beg() + UsedSize());
81a3587ea6SKirill Stoimenov }
82a3587ea6SKirill Stoimenov 
831b0d63c5SKirill Stoimenov inline void Metadata::SetAllocated(u32 stack, u64 size) {
841b0d63c5SKirill Stoimenov   Thread *t = GetCurrentThread();
851b0d63c5SKirill Stoimenov   u64 context = t ? t->unique_id() : kMainTid;
861b0d63c5SKirill Stoimenov   context <<= 32;
871b0d63c5SKirill Stoimenov   context += stack;
881b0d63c5SKirill Stoimenov   requested_size_low = size & ((1ul << 32) - 1);
891b0d63c5SKirill Stoimenov   requested_size_high = size >> 32;
901b0d63c5SKirill Stoimenov   atomic_store(&alloc_context_id, context, memory_order_relaxed);
911b0d63c5SKirill Stoimenov   atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
921b0d63c5SKirill Stoimenov }
931b0d63c5SKirill Stoimenov 
941b0d63c5SKirill Stoimenov inline void Metadata::SetUnallocated() {
951b0d63c5SKirill Stoimenov   atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
961b0d63c5SKirill Stoimenov   requested_size_low = 0;
971b0d63c5SKirill Stoimenov   requested_size_high = 0;
981b0d63c5SKirill Stoimenov   atomic_store(&alloc_context_id, 0, memory_order_relaxed);
991b0d63c5SKirill Stoimenov }
1001b0d63c5SKirill Stoimenov 
1011b0d63c5SKirill Stoimenov inline bool Metadata::IsAllocated() const {
1029de144eeSKirill Stoimenov   return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
1031b0d63c5SKirill Stoimenov }
1041b0d63c5SKirill Stoimenov 
1051b0d63c5SKirill Stoimenov inline u64 Metadata::GetRequestedSize() const {
1061b0d63c5SKirill Stoimenov   return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
1071b0d63c5SKirill Stoimenov }
1081b0d63c5SKirill Stoimenov 
1091b0d63c5SKirill Stoimenov inline u32 Metadata::GetAllocStackId() const {
1101b0d63c5SKirill Stoimenov   return atomic_load(&alloc_context_id, memory_order_relaxed);
1111b0d63c5SKirill Stoimenov }
1121b0d63c5SKirill Stoimenov 
113dc7498ebSEnna1 inline u32 Metadata::GetAllocThreadId() const {
114dc7498ebSEnna1   u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
115dc7498ebSEnna1   u32 tid = context >> 32;
116dc7498ebSEnna1   return tid;
117dc7498ebSEnna1 }
118dc7498ebSEnna1 
119e3b6d110SNico Weber void GetAllocatorStats(AllocatorStatCounters s) {
120e3b6d110SNico Weber   allocator.GetStats(s);
121e3b6d110SNico Weber }
122e3b6d110SNico Weber 
1231b0d63c5SKirill Stoimenov inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
1241b0d63c5SKirill Stoimenov   lsan_tag = tag;
1251b0d63c5SKirill Stoimenov }
1261b0d63c5SKirill Stoimenov 
1271b0d63c5SKirill Stoimenov inline __lsan::ChunkTag Metadata::GetLsanTag() const {
1281b0d63c5SKirill Stoimenov   return static_cast<__lsan::ChunkTag>(lsan_tag);
1291b0d63c5SKirill Stoimenov }
1301b0d63c5SKirill Stoimenov 
131189c5525SLeonard Chan uptr GetAliasRegionStart() {
132189c5525SLeonard Chan #if defined(HWASAN_ALIASING_MODE)
133189c5525SLeonard Chan   constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
134189c5525SLeonard Chan   uptr AliasRegionStart =
135189c5525SLeonard Chan       __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
136189c5525SLeonard Chan 
137189c5525SLeonard Chan   CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
138189c5525SLeonard Chan            __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
139189c5525SLeonard Chan   CHECK_EQ(
140189c5525SLeonard Chan       (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
141189c5525SLeonard Chan       __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
142189c5525SLeonard Chan   return AliasRegionStart;
143189c5525SLeonard Chan #else
144189c5525SLeonard Chan   return 0;
145189c5525SLeonard Chan #endif
146189c5525SLeonard Chan }
147189c5525SLeonard Chan 
148e3b6d110SNico Weber void HwasanAllocatorInit() {
149e3b6d110SNico Weber   atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
150e3b6d110SNico Weber                        !flags()->disable_allocator_tagging);
151e3b6d110SNico Weber   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
152d9b574c3SVitaly Buka   allocator.InitLinkerInitialized(
153d9b574c3SVitaly Buka       common_flags()->allocator_release_to_os_interval_ms,
154189c5525SLeonard Chan       GetAliasRegionStart());
1551366262bSPeter Collingbourne   for (uptr i = 0; i < sizeof(tail_magic); i++)
156e3b6d110SNico Weber     tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
157d87468e5SKirill Stoimenov   if (common_flags()->max_allocation_size_mb) {
158d87468e5SKirill Stoimenov     max_malloc_size = common_flags()->max_allocation_size_mb << 20;
159d87468e5SKirill Stoimenov     max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
160d87468e5SKirill Stoimenov   } else {
161d87468e5SKirill Stoimenov     max_malloc_size = kMaxAllowedMallocSize;
162d87468e5SKirill Stoimenov   }
163e3b6d110SNico Weber }
164e3b6d110SNico Weber 
1658a570a87SEvgenii Stepanov void HwasanAllocatorLock() { allocator.ForceLock(); }
1668a570a87SEvgenii Stepanov 
1678a570a87SEvgenii Stepanov void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
1688a570a87SEvgenii Stepanov 
169d9b574c3SVitaly Buka void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
170d9b574c3SVitaly Buka 
171fa58f327SVitaly Buka void AllocatorThreadFinish(AllocatorCache *cache) {
172e3b6d110SNico Weber   allocator.SwallowCache(cache);
173d9b574c3SVitaly Buka   allocator.DestroyCache(cache);
174e3b6d110SNico Weber }
175e3b6d110SNico Weber 
176e3b6d110SNico Weber static uptr TaggedSize(uptr size) {
177e3b6d110SNico Weber   if (!size) size = 1;
178e3b6d110SNico Weber   uptr new_size = RoundUpTo(size, kShadowAlignment);
179e3b6d110SNico Weber   CHECK_GE(new_size, size);
180e3b6d110SNico Weber   return new_size;
181e3b6d110SNico Weber }
182e3b6d110SNico Weber 
183e3b6d110SNico Weber static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
184e3b6d110SNico Weber                             bool zeroise) {
185914f8694SKirill Stoimenov   // Keep this consistent with LSAN and ASAN behavior.
186c6ea5b0cSKirill Stoimenov   if (UNLIKELY(orig_size == 0))
187914f8694SKirill Stoimenov     orig_size = 1;
188d87468e5SKirill Stoimenov   if (UNLIKELY(orig_size > max_malloc_size)) {
189e3b6d110SNico Weber     if (AllocatorMayReturnNull()) {
190e3b6d110SNico Weber       Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
191e3b6d110SNico Weber              orig_size);
192e3b6d110SNico Weber       return nullptr;
193e3b6d110SNico Weber     }
194d87468e5SKirill Stoimenov     ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
195e3b6d110SNico Weber   }
19663180012SVitaly Buka   if (UNLIKELY(IsRssLimitExceeded())) {
19763180012SVitaly Buka     if (AllocatorMayReturnNull())
19863180012SVitaly Buka       return nullptr;
19963180012SVitaly Buka     ReportRssLimitExceeded(stack);
20063180012SVitaly Buka   }
201e3b6d110SNico Weber 
202e3b6d110SNico Weber   alignment = Max(alignment, kShadowAlignment);
203e3b6d110SNico Weber   uptr size = TaggedSize(orig_size);
204e3b6d110SNico Weber   Thread *t = GetCurrentThread();
205e3b6d110SNico Weber   void *allocated;
206e3b6d110SNico Weber   if (t) {
207e3b6d110SNico Weber     allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
208e3b6d110SNico Weber   } else {
209e3b6d110SNico Weber     SpinMutexLock l(&fallback_mutex);
210e3b6d110SNico Weber     AllocatorCache *cache = &fallback_allocator_cache;
211e3b6d110SNico Weber     allocated = allocator.Allocate(cache, size, alignment);
212e3b6d110SNico Weber   }
213e3b6d110SNico Weber   if (UNLIKELY(!allocated)) {
214e3b6d110SNico Weber     SetAllocatorOutOfMemory();
215e3b6d110SNico Weber     if (AllocatorMayReturnNull())
216e3b6d110SNico Weber       return nullptr;
217e3b6d110SNico Weber     ReportOutOfMemory(size, stack);
218e3b6d110SNico Weber   }
219e3b6d110SNico Weber   if (zeroise) {
22082e5994cSLeonard Chan     // The secondary allocator mmaps memory, which should be zero-inited so we
22182e5994cSLeonard Chan     // don't need to explicitly clear it.
22282e5994cSLeonard Chan     if (allocator.FromPrimary(allocated))
223e3b6d110SNico Weber       internal_memset(allocated, 0, size);
224e3b6d110SNico Weber   } else if (flags()->max_malloc_fill_size > 0) {
225e3b6d110SNico Weber     uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
226e3b6d110SNico Weber     internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
227e3b6d110SNico Weber   }
2281366262bSPeter Collingbourne   if (size != orig_size) {
229433b2eafSMitch Phillips     u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
230433b2eafSMitch Phillips     uptr tail_length = size - orig_size;
231433b2eafSMitch Phillips     internal_memcpy(tail, tail_magic, tail_length - 1);
232433b2eafSMitch Phillips     // Short granule is excluded from magic tail, so we explicitly untag.
233433b2eafSMitch Phillips     tail[tail_length - 1] = 0;
2341366262bSPeter Collingbourne   }
235e3b6d110SNico Weber 
236e3b6d110SNico Weber   void *user_ptr = allocated;
2373e4faf08SMatt Morehouse   if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
23843aa6e6bSVitaly Buka       atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
23943aa6e6bSVitaly Buka       flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
2401366262bSPeter Collingbourne     tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
2411366262bSPeter Collingbourne     uptr tag_size = orig_size ? orig_size : 1;
2421366262bSPeter Collingbourne     uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
24343aa6e6bSVitaly Buka     user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
2441366262bSPeter Collingbourne     if (full_granule_size != tag_size) {
24543aa6e6bSVitaly Buka       u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
2461366262bSPeter Collingbourne       TagMemoryAligned((uptr)short_granule, kShadowAlignment,
2471366262bSPeter Collingbourne                        tag_size % kShadowAlignment);
2481366262bSPeter Collingbourne       short_granule[kShadowAlignment - 1] = tag;
249e3b6d110SNico Weber     }
2501366262bSPeter Collingbourne   } else {
25143aa6e6bSVitaly Buka     // Tagging can not be completely skipped. If it's disabled, we need to tag
25243aa6e6bSVitaly Buka     // with zeros.
2531366262bSPeter Collingbourne     user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
254e3b6d110SNico Weber   }
255e3b6d110SNico Weber 
2561b0d63c5SKirill Stoimenov   Metadata *meta =
2571b0d63c5SKirill Stoimenov       reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
258e022ca8bSKirill Stoimenov #if CAN_SANITIZE_LEAKS
259e022ca8bSKirill Stoimenov   meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
260e022ca8bSKirill Stoimenov                                                   : __lsan::kDirectlyLeaked);
261e022ca8bSKirill Stoimenov #endif
2621b0d63c5SKirill Stoimenov   meta->SetAllocated(StackDepotPut(*stack), orig_size);
2633ab36712SJin Xin Ng   RunMallocHooks(user_ptr, orig_size);
264e3b6d110SNico Weber   return user_ptr;
265e3b6d110SNico Weber }
266e3b6d110SNico Weber 
267e3b6d110SNico Weber static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
268e3b6d110SNico Weber   CHECK(tagged_ptr);
2691366262bSPeter Collingbourne   uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
2703e4faf08SMatt Morehouse   if (!InTaggableRegion(tagged_uptr))
2713e4faf08SMatt Morehouse     return true;
272e3b6d110SNico Weber   tag_t mem_tag = *reinterpret_cast<tag_t *>(
273e3b6d110SNico Weber       MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
2741366262bSPeter Collingbourne   return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
275e3b6d110SNico Weber }
276e3b6d110SNico Weber 
277bae9527cSFlorian Mayer static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
278bae9527cSFlorian Mayer                              void *tagged_ptr) {
279bae9527cSFlorian Mayer   // This function can return true if halt_on_error is false.
280023f18bbSFlorian Mayer   if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
281de916a7bSFlorian Mayer       !PointerAndMemoryTagsMatch(tagged_ptr)) {
282bae9527cSFlorian Mayer     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
283bae9527cSFlorian Mayer     return true;
284bae9527cSFlorian Mayer   }
285bae9527cSFlorian Mayer   return false;
286bae9527cSFlorian Mayer }
287bae9527cSFlorian Mayer 
288e3b6d110SNico Weber static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
289e3b6d110SNico Weber   CHECK(tagged_ptr);
29082b1e3b4SVitaly Buka   void *untagged_ptr = UntagPtr(tagged_ptr);
291fd51ab63SMitch Phillips 
2928681202dSMitch Phillips   if (RunFreeHooks(tagged_ptr))
2938681202dSMitch Phillips     return;
2948681202dSMitch Phillips 
295bae9527cSFlorian Mayer   if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
296bae9527cSFlorian Mayer     return;
297bae9527cSFlorian Mayer 
298e3b6d110SNico Weber   void *aligned_ptr = reinterpret_cast<void *>(
299e3b6d110SNico Weber       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
300ad8494c0SFlorian Mayer   tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
301e3b6d110SNico Weber   Metadata *meta =
302e3b6d110SNico Weber       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
303bae9527cSFlorian Mayer   if (!meta) {
304bae9527cSFlorian Mayer     ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
305bae9527cSFlorian Mayer     return;
306bae9527cSFlorian Mayer   }
307c1903ba5SJin Xin Ng 
308ee56d88bSKirill Stoimenov   uptr orig_size = meta->GetRequestedSize();
309e3b6d110SNico Weber   u32 free_context_id = StackDepotPut(*stack);
3101b0d63c5SKirill Stoimenov   u32 alloc_context_id = meta->GetAllocStackId();
311dc7498ebSEnna1   u32 alloc_thread_id = meta->GetAllocThreadId();
312e3b6d110SNico Weber 
31382b1e3b4SVitaly Buka   bool in_taggable_region =
31482b1e3b4SVitaly Buka       InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
31582b1e3b4SVitaly Buka 
316e3b6d110SNico Weber   // Check tail magic.
317e3b6d110SNico Weber   uptr tagged_size = TaggedSize(orig_size);
3181366262bSPeter Collingbourne   if (flags()->free_checks_tail_magic && orig_size &&
3191366262bSPeter Collingbourne       tagged_size != orig_size) {
3201366262bSPeter Collingbourne     uptr tail_size = tagged_size - orig_size - 1;
321e3b6d110SNico Weber     CHECK_LT(tail_size, kShadowAlignment);
322e3b6d110SNico Weber     void *tail_beg = reinterpret_cast<void *>(
323e3b6d110SNico Weber         reinterpret_cast<uptr>(aligned_ptr) + orig_size);
324fd51ab63SMitch Phillips     tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
325fd51ab63SMitch Phillips         reinterpret_cast<uptr>(tail_beg) + tail_size));
326fd51ab63SMitch Phillips     if (tail_size &&
327fd51ab63SMitch Phillips         (internal_memcmp(tail_beg, tail_magic, tail_size) ||
328fd51ab63SMitch Phillips          (in_taggable_region && pointer_tag != short_granule_memtag)))
329e3b6d110SNico Weber       ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
3301366262bSPeter Collingbourne                             orig_size, tail_magic);
331e3b6d110SNico Weber   }
332e3b6d110SNico Weber 
3331b0d63c5SKirill Stoimenov   // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
3341b0d63c5SKirill Stoimenov   meta->SetUnallocated();
335e3b6d110SNico Weber   // This memory will not be reused by anyone else, so we are free to keep it
336e3b6d110SNico Weber   // poisoned.
337e3b6d110SNico Weber   Thread *t = GetCurrentThread();
338e3b6d110SNico Weber   if (flags()->max_free_fill_size > 0) {
339e3b6d110SNico Weber     uptr fill_size =
340e3b6d110SNico Weber         Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
341e3b6d110SNico Weber     internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
342e3b6d110SNico Weber   }
343fd51ab63SMitch Phillips   if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
344b55f05a3SVitaly Buka       atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
345b55f05a3SVitaly Buka       allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
3463e4faf08SMatt Morehouse     // Always store full 8-bit tags on free to maximize UAF detection.
34718070723SFlorian Mayer     tag_t tag;
34818070723SFlorian Mayer     if (t) {
34918070723SFlorian Mayer       // Make sure we are not using a short granule tag as a poison tag. This
35018070723SFlorian Mayer       // would make us attempt to read the memory on a UaF.
35118070723SFlorian Mayer       // The tag can be zero if tagging is disabled on this thread.
35218070723SFlorian Mayer       do {
35318070723SFlorian Mayer         tag = t->GenerateRandomTag(/*num_bits=*/8);
354ad8494c0SFlorian Mayer       } while (
355ad8494c0SFlorian Mayer           UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
35618070723SFlorian Mayer     } else {
35718070723SFlorian Mayer       static_assert(kFallbackFreeTag >= kShadowAlignment,
35818070723SFlorian Mayer                     "fallback tag must not be a short granule tag.");
35918070723SFlorian Mayer       tag = kFallbackFreeTag;
36018070723SFlorian Mayer     }
361e3b6d110SNico Weber     TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
3623e4faf08SMatt Morehouse                      tag);
3633e4faf08SMatt Morehouse   }
364e3b6d110SNico Weber   if (t) {
365e3b6d110SNico Weber     allocator.Deallocate(t->allocator_cache(), aligned_ptr);
366e3b6d110SNico Weber     if (auto *ha = t->heap_allocations())
367dc7498ebSEnna1       ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
368dc7498ebSEnna1                 alloc_context_id, free_context_id,
369dc7498ebSEnna1                 static_cast<u32>(orig_size)});
370e3b6d110SNico Weber   } else {
371e3b6d110SNico Weber     SpinMutexLock l(&fallback_mutex);
372e3b6d110SNico Weber     AllocatorCache *cache = &fallback_allocator_cache;
373e3b6d110SNico Weber     allocator.Deallocate(cache, aligned_ptr);
374e3b6d110SNico Weber   }
375e3b6d110SNico Weber }
376e3b6d110SNico Weber 
377e3b6d110SNico Weber static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
378e3b6d110SNico Weber                               uptr new_size, uptr alignment) {
37982b1e3b4SVitaly Buka   void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
380bae9527cSFlorian Mayer   if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
381bae9527cSFlorian Mayer     return nullptr;
382e3b6d110SNico Weber   void *tagged_ptr_new =
383e3b6d110SNico Weber       HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
384e3b6d110SNico Weber   if (tagged_ptr_old && tagged_ptr_new) {
385e3b6d110SNico Weber     Metadata *meta =
386e3b6d110SNico Weber         reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
38782b1e3b4SVitaly Buka     void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
3884c836746SVitaly Buka     internal_memcpy(untagged_ptr_new, untagged_ptr_old,
389ee56d88bSKirill Stoimenov                     Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
390e3b6d110SNico Weber     HwasanDeallocate(stack, tagged_ptr_old);
391e3b6d110SNico Weber   }
392e3b6d110SNico Weber   return tagged_ptr_new;
393e3b6d110SNico Weber }
394e3b6d110SNico Weber 
395e3b6d110SNico Weber static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
396e3b6d110SNico Weber   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
397e3b6d110SNico Weber     if (AllocatorMayReturnNull())
398e3b6d110SNico Weber       return nullptr;
399e3b6d110SNico Weber     ReportCallocOverflow(nmemb, size, stack);
400e3b6d110SNico Weber   }
401e3b6d110SNico Weber   return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
402e3b6d110SNico Weber }
403e3b6d110SNico Weber 
404e3b6d110SNico Weber HwasanChunkView FindHeapChunkByAddress(uptr address) {
405bae9527cSFlorian Mayer   if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
406bae9527cSFlorian Mayer     return HwasanChunkView();
407e3b6d110SNico Weber   void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
408e3b6d110SNico Weber   if (!block)
409e3b6d110SNico Weber     return HwasanChunkView();
410e3b6d110SNico Weber   Metadata *metadata =
411e3b6d110SNico Weber       reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
412e3b6d110SNico Weber   return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
413e3b6d110SNico Weber }
414e3b6d110SNico Weber 
4158c63dc6fSFangrui Song static const void *AllocationBegin(const void *p) {
41682b1e3b4SVitaly Buka   const void *untagged_ptr = UntagPtr(p);
417415b1cfdSThurston Dang   if (!untagged_ptr)
418415b1cfdSThurston Dang     return nullptr;
419415b1cfdSThurston Dang 
420415b1cfdSThurston Dang   const void *beg = allocator.GetBlockBegin(untagged_ptr);
421415b1cfdSThurston Dang   if (!beg)
422415b1cfdSThurston Dang     return nullptr;
423415b1cfdSThurston Dang 
424415b1cfdSThurston Dang   Metadata *b = (Metadata *)allocator.GetMetaData(beg);
425415b1cfdSThurston Dang   if (b->GetRequestedSize() == 0)
426415b1cfdSThurston Dang     return nullptr;
427415b1cfdSThurston Dang 
428415b1cfdSThurston Dang   tag_t tag = GetTagFromPointer((uptr)p);
429d644ab02SThurston Dang   return (const void *)AddTagToPointer((uptr)beg, tag);
430415b1cfdSThurston Dang }
431415b1cfdSThurston Dang 
43266f162a6SVitaly Buka static uptr AllocationSize(const void *p) {
43382b1e3b4SVitaly Buka   const void *untagged_ptr = UntagPtr(p);
434e3b6d110SNico Weber   if (!untagged_ptr) return 0;
435e3b6d110SNico Weber   const void *beg = allocator.GetBlockBegin(untagged_ptr);
43666f162a6SVitaly Buka   if (!beg)
43766f162a6SVitaly Buka     return 0;
43866f162a6SVitaly Buka   Metadata *b = (Metadata *)allocator.GetMetaData(beg);
439ee56d88bSKirill Stoimenov   return b->GetRequestedSize();
440e3b6d110SNico Weber }
441e3b6d110SNico Weber 
4427639265aSJin Xin Ng static uptr AllocationSizeFast(const void *p) {
4437639265aSJin Xin Ng   const void *untagged_ptr = UntagPtr(p);
4447639265aSJin Xin Ng   void *aligned_ptr = reinterpret_cast<void *>(
4457639265aSJin Xin Ng       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
4467639265aSJin Xin Ng   Metadata *meta =
4477639265aSJin Xin Ng       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
4487639265aSJin Xin Ng   return meta->GetRequestedSize();
4497639265aSJin Xin Ng }
4507639265aSJin Xin Ng 
451e3b6d110SNico Weber void *hwasan_malloc(uptr size, StackTrace *stack) {
452e3b6d110SNico Weber   return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
453e3b6d110SNico Weber }
454e3b6d110SNico Weber 
455e3b6d110SNico Weber void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
456e3b6d110SNico Weber   return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
457e3b6d110SNico Weber }
458e3b6d110SNico Weber 
459e3b6d110SNico Weber void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
460e3b6d110SNico Weber   if (!ptr)
461e3b6d110SNico Weber     return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
462e3b6d110SNico Weber   if (size == 0) {
463e3b6d110SNico Weber     HwasanDeallocate(stack, ptr);
464e3b6d110SNico Weber     return nullptr;
465e3b6d110SNico Weber   }
466e3b6d110SNico Weber   return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
467e3b6d110SNico Weber }
468e3b6d110SNico Weber 
469d1a71004SEvgeniy Stepanov void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
470d1a71004SEvgeniy Stepanov   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
471d1a71004SEvgeniy Stepanov     errno = errno_ENOMEM;
472d1a71004SEvgeniy Stepanov     if (AllocatorMayReturnNull())
473d1a71004SEvgeniy Stepanov       return nullptr;
474d1a71004SEvgeniy Stepanov     ReportReallocArrayOverflow(nmemb, size, stack);
475d1a71004SEvgeniy Stepanov   }
476d1a71004SEvgeniy Stepanov   return hwasan_realloc(ptr, nmemb * size, stack);
477d1a71004SEvgeniy Stepanov }
478d1a71004SEvgeniy Stepanov 
479e3b6d110SNico Weber void *hwasan_valloc(uptr size, StackTrace *stack) {
480e3b6d110SNico Weber   return SetErrnoOnNull(
481e3b6d110SNico Weber       HwasanAllocate(stack, size, GetPageSizeCached(), false));
482e3b6d110SNico Weber }
483e3b6d110SNico Weber 
484e3b6d110SNico Weber void *hwasan_pvalloc(uptr size, StackTrace *stack) {
485e3b6d110SNico Weber   uptr PageSize = GetPageSizeCached();
486e3b6d110SNico Weber   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
487e3b6d110SNico Weber     errno = errno_ENOMEM;
488e3b6d110SNico Weber     if (AllocatorMayReturnNull())
489e3b6d110SNico Weber       return nullptr;
490e3b6d110SNico Weber     ReportPvallocOverflow(size, stack);
491e3b6d110SNico Weber   }
492e3b6d110SNico Weber   // pvalloc(0) should allocate one page.
493e3b6d110SNico Weber   size = size ? RoundUpTo(size, PageSize) : PageSize;
494e3b6d110SNico Weber   return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
495e3b6d110SNico Weber }
496e3b6d110SNico Weber 
497e3b6d110SNico Weber void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
498e3b6d110SNico Weber   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
499e3b6d110SNico Weber     errno = errno_EINVAL;
500e3b6d110SNico Weber     if (AllocatorMayReturnNull())
501e3b6d110SNico Weber       return nullptr;
502e3b6d110SNico Weber     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
503e3b6d110SNico Weber   }
504e3b6d110SNico Weber   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
505e3b6d110SNico Weber }
506e3b6d110SNico Weber 
507e3b6d110SNico Weber void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
508e3b6d110SNico Weber   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
509e3b6d110SNico Weber     errno = errno_EINVAL;
510e3b6d110SNico Weber     if (AllocatorMayReturnNull())
511e3b6d110SNico Weber       return nullptr;
512e3b6d110SNico Weber     ReportInvalidAllocationAlignment(alignment, stack);
513e3b6d110SNico Weber   }
514e3b6d110SNico Weber   return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
515e3b6d110SNico Weber }
516e3b6d110SNico Weber 
517e3b6d110SNico Weber int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
518e3b6d110SNico Weber                         StackTrace *stack) {
519e3b6d110SNico Weber   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
520e3b6d110SNico Weber     if (AllocatorMayReturnNull())
521e3b6d110SNico Weber       return errno_EINVAL;
522e3b6d110SNico Weber     ReportInvalidPosixMemalignAlignment(alignment, stack);
523e3b6d110SNico Weber   }
524e3b6d110SNico Weber   void *ptr = HwasanAllocate(stack, size, alignment, false);
525e3b6d110SNico Weber   if (UNLIKELY(!ptr))
526e3b6d110SNico Weber     // OOM error is already taken care of by HwasanAllocate.
527e3b6d110SNico Weber     return errno_ENOMEM;
528e3b6d110SNico Weber   CHECK(IsAligned((uptr)ptr, alignment));
52996a4167bSMatt Morehouse   *memptr = ptr;
530e3b6d110SNico Weber   return 0;
531e3b6d110SNico Weber }
532e3b6d110SNico Weber 
533e3b6d110SNico Weber void hwasan_free(void *ptr, StackTrace *stack) {
534e3b6d110SNico Weber   return HwasanDeallocate(stack, ptr);
535e3b6d110SNico Weber }
536e3b6d110SNico Weber 
537e3b6d110SNico Weber }  // namespace __hwasan
538e3b6d110SNico Weber 
5391b0d63c5SKirill Stoimenov // --- Implementation of LSan-specific functions --- {{{1
5401b0d63c5SKirill Stoimenov namespace __lsan {
5411b0d63c5SKirill Stoimenov 
542a3587ea6SKirill Stoimenov void LockAllocator() {
543a3587ea6SKirill Stoimenov   __hwasan::HwasanAllocatorLock();
544a3587ea6SKirill Stoimenov }
545a3587ea6SKirill Stoimenov 
546a3587ea6SKirill Stoimenov void UnlockAllocator() {
547a3587ea6SKirill Stoimenov   __hwasan::HwasanAllocatorUnlock();
548a3587ea6SKirill Stoimenov }
549a3587ea6SKirill Stoimenov 
550a3587ea6SKirill Stoimenov void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
551a3587ea6SKirill Stoimenov   *begin = (uptr)&__hwasan::allocator;
552a3587ea6SKirill Stoimenov   *end = *begin + sizeof(__hwasan::allocator);
553a3587ea6SKirill Stoimenov }
554a3587ea6SKirill Stoimenov 
555a3587ea6SKirill Stoimenov uptr PointsIntoChunk(void *p) {
55682b1e3b4SVitaly Buka   p = UntagPtr(p);
5572b0322edSKirill Stoimenov   uptr addr = reinterpret_cast<uptr>(p);
5582b0322edSKirill Stoimenov   uptr chunk =
5592b0322edSKirill Stoimenov       reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
5602b0322edSKirill Stoimenov   if (!chunk)
561a3587ea6SKirill Stoimenov     return 0;
56295455804SKirill Stoimenov   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
5632b0322edSKirill Stoimenov       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
56495455804SKirill Stoimenov   if (!metadata || !metadata->IsAllocated())
56595455804SKirill Stoimenov     return 0;
5662b0322edSKirill Stoimenov   if (addr < chunk + metadata->GetRequestedSize())
567a3587ea6SKirill Stoimenov     return chunk;
5682b0322edSKirill Stoimenov   if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
569a3587ea6SKirill Stoimenov     return chunk;
570a3587ea6SKirill Stoimenov   return 0;
571a3587ea6SKirill Stoimenov }
572a3587ea6SKirill Stoimenov 
573a3587ea6SKirill Stoimenov uptr GetUserBegin(uptr chunk) {
5742b0322edSKirill Stoimenov   CHECK_EQ(UntagAddr(chunk), chunk);
5752b0322edSKirill Stoimenov   void *block = __hwasan::allocator.GetBlockBeginFastLocked(
5762b0322edSKirill Stoimenov       reinterpret_cast<void *>(chunk));
57795455804SKirill Stoimenov   if (!block)
57895455804SKirill Stoimenov     return 0;
57995455804SKirill Stoimenov   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
58095455804SKirill Stoimenov       __hwasan::allocator.GetMetaData(block));
58195455804SKirill Stoimenov   if (!metadata || !metadata->IsAllocated())
58295455804SKirill Stoimenov     return 0;
58395455804SKirill Stoimenov 
58495455804SKirill Stoimenov   return reinterpret_cast<uptr>(block);
585a3587ea6SKirill Stoimenov }
586a3587ea6SKirill Stoimenov 
587eb3be660SKirill Stoimenov uptr GetUserAddr(uptr chunk) {
588fd85a6dfSVitaly Buka   if (!InTaggableRegion(chunk))
589eb3be660SKirill Stoimenov     return chunk;
59082b1e3b4SVitaly Buka   tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
591eb3be660SKirill Stoimenov   return AddTagToPointer(chunk, mem_tag);
592eb3be660SKirill Stoimenov }
593eb3be660SKirill Stoimenov 
5941b0d63c5SKirill Stoimenov LsanMetadata::LsanMetadata(uptr chunk) {
5952b0322edSKirill Stoimenov   CHECK_EQ(UntagAddr(chunk), chunk);
596c804775cSVitaly Buka   metadata_ =
5970e08a854SVitaly Buka       chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
5980e08a854SVitaly Buka             : nullptr;
5991b0d63c5SKirill Stoimenov }
6001b0d63c5SKirill Stoimenov 
6011b0d63c5SKirill Stoimenov bool LsanMetadata::allocated() const {
6021b0d63c5SKirill Stoimenov   if (!metadata_)
6031b0d63c5SKirill Stoimenov     return false;
6041b0d63c5SKirill Stoimenov   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
6051b0d63c5SKirill Stoimenov   return m->IsAllocated();
6061b0d63c5SKirill Stoimenov }
6071b0d63c5SKirill Stoimenov 
6081b0d63c5SKirill Stoimenov ChunkTag LsanMetadata::tag() const {
6091b0d63c5SKirill Stoimenov   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
6101b0d63c5SKirill Stoimenov   return m->GetLsanTag();
6111b0d63c5SKirill Stoimenov }
6121b0d63c5SKirill Stoimenov 
6131b0d63c5SKirill Stoimenov void LsanMetadata::set_tag(ChunkTag value) {
6141b0d63c5SKirill Stoimenov   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
6151b0d63c5SKirill Stoimenov   m->SetLsanTag(value);
6161b0d63c5SKirill Stoimenov }
6171b0d63c5SKirill Stoimenov 
6181b0d63c5SKirill Stoimenov uptr LsanMetadata::requested_size() const {
6191b0d63c5SKirill Stoimenov   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
6201b0d63c5SKirill Stoimenov   return m->GetRequestedSize();
6211b0d63c5SKirill Stoimenov }
6221b0d63c5SKirill Stoimenov 
6231b0d63c5SKirill Stoimenov u32 LsanMetadata::stack_trace_id() const {
6241b0d63c5SKirill Stoimenov   __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
6251b0d63c5SKirill Stoimenov   return m->GetAllocStackId();
6261b0d63c5SKirill Stoimenov }
6271b0d63c5SKirill Stoimenov 
628a3587ea6SKirill Stoimenov void ForEachChunk(ForEachChunkCallback callback, void *arg) {
629a3587ea6SKirill Stoimenov   __hwasan::allocator.ForEachChunk(callback, arg);
630a3587ea6SKirill Stoimenov }
631a3587ea6SKirill Stoimenov 
63239c06024SVitaly Buka IgnoreObjectResult IgnoreObject(const void *p) {
63382b1e3b4SVitaly Buka   p = UntagPtr(p);
6342b0322edSKirill Stoimenov   uptr addr = reinterpret_cast<uptr>(p);
63539c06024SVitaly Buka   uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
6362b0322edSKirill Stoimenov   if (!chunk)
637e022ca8bSKirill Stoimenov     return kIgnoreObjectInvalid;
638e022ca8bSKirill Stoimenov   __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
6392b0322edSKirill Stoimenov       __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
6402b0322edSKirill Stoimenov   if (!metadata || !metadata->IsAllocated())
641e022ca8bSKirill Stoimenov     return kIgnoreObjectInvalid;
6422b0322edSKirill Stoimenov   if (addr >= chunk + metadata->GetRequestedSize())
6432b0322edSKirill Stoimenov     return kIgnoreObjectInvalid;
644e022ca8bSKirill Stoimenov   if (metadata->GetLsanTag() == kIgnored)
645e022ca8bSKirill Stoimenov     return kIgnoreObjectAlreadyIgnored;
6462b0322edSKirill Stoimenov 
647e022ca8bSKirill Stoimenov   metadata->SetLsanTag(kIgnored);
648e022ca8bSKirill Stoimenov   return kIgnoreObjectSuccess;
649e022ca8bSKirill Stoimenov }
650e022ca8bSKirill Stoimenov 
6511b0d63c5SKirill Stoimenov }  // namespace __lsan
6521b0d63c5SKirill Stoimenov 
653e3b6d110SNico Weber using namespace __hwasan;
654e3b6d110SNico Weber 
655e3b6d110SNico Weber void __hwasan_enable_allocator_tagging() {
656e3b6d110SNico Weber   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
657e3b6d110SNico Weber }
658e3b6d110SNico Weber 
659e3b6d110SNico Weber void __hwasan_disable_allocator_tagging() {
660e3b6d110SNico Weber   atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
661e3b6d110SNico Weber }
662e3b6d110SNico Weber 
663e3b6d110SNico Weber uptr __sanitizer_get_current_allocated_bytes() {
664e3b6d110SNico Weber   uptr stats[AllocatorStatCount];
665e3b6d110SNico Weber   allocator.GetStats(stats);
666e3b6d110SNico Weber   return stats[AllocatorStatAllocated];
667e3b6d110SNico Weber }
668e3b6d110SNico Weber 
669e3b6d110SNico Weber uptr __sanitizer_get_heap_size() {
670e3b6d110SNico Weber   uptr stats[AllocatorStatCount];
671e3b6d110SNico Weber   allocator.GetStats(stats);
672e3b6d110SNico Weber   return stats[AllocatorStatMapped];
673e3b6d110SNico Weber }
674e3b6d110SNico Weber 
675e3b6d110SNico Weber uptr __sanitizer_get_free_bytes() { return 1; }
676e3b6d110SNico Weber 
677e3b6d110SNico Weber uptr __sanitizer_get_unmapped_bytes() { return 1; }
678e3b6d110SNico Weber 
679e3b6d110SNico Weber uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
680e3b6d110SNico Weber 
681e3b6d110SNico Weber int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
682e3b6d110SNico Weber 
683d644ab02SThurston Dang const void *__sanitizer_get_allocated_begin(const void *p) {
684415b1cfdSThurston Dang   return AllocationBegin(p);
685415b1cfdSThurston Dang }
686415b1cfdSThurston Dang 
687e3b6d110SNico Weber uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
6887ac72ceaSVitaly Buka 
6897639265aSJin Xin Ng uptr __sanitizer_get_allocated_size_fast(const void *p) {
6907639265aSJin Xin Ng   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
6917639265aSJin Xin Ng   uptr ret = AllocationSizeFast(p);
6927639265aSJin Xin Ng   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
6937639265aSJin Xin Ng   return ret;
6947639265aSJin Xin Ng }
6957639265aSJin Xin Ng 
6967ac72ceaSVitaly Buka void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
697