xref: /llvm-project/compiler-rt/lib/asan/asan_allocator.cpp (revision 6b654a09c2b689438237252d6f0fbcb25c993703)
1 //===-- asan_allocator.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "asan_allocator.h"
18 
19 #include "asan_internal.h"
20 #include "asan_mapping.h"
21 #include "asan_poisoning.h"
22 #include "asan_report.h"
23 #include "asan_stack.h"
24 #include "asan_suppressions.h"
25 #include "asan_thread.h"
26 #include "lsan/lsan_common.h"
27 #include "sanitizer_common/sanitizer_allocator_checks.h"
28 #include "sanitizer_common/sanitizer_allocator_interface.h"
29 #include "sanitizer_common/sanitizer_common.h"
30 #include "sanitizer_common/sanitizer_errno.h"
31 #include "sanitizer_common/sanitizer_flags.h"
32 #include "sanitizer_common/sanitizer_internal_defs.h"
33 #include "sanitizer_common/sanitizer_list.h"
34 #include "sanitizer_common/sanitizer_quarantine.h"
35 #include "sanitizer_common/sanitizer_stackdepot.h"
36 
37 namespace __asan {
38 
39 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
40 // We use adaptive redzones: for larger allocation larger redzones are used.
41 static u32 RZLog2Size(u32 rz_log) {
42   CHECK_LT(rz_log, 8);
43   return 16 << rz_log;
44 }
45 
46 static u32 RZSize2Log(u32 rz_size) {
47   CHECK_GE(rz_size, 16);
48   CHECK_LE(rz_size, 2048);
49   CHECK(IsPowerOfTwo(rz_size));
50   u32 res = Log2(rz_size) - 4;
51   CHECK_EQ(rz_size, RZLog2Size(res));
52   return res;
53 }
54 
55 static AsanAllocator &get_allocator();
56 
57 static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
58                                u32 tid, u32 stack) {
59   u64 context = tid;
60   context <<= 32;
61   context += stack;
62   atomic_store(atomic_context, context, memory_order_relaxed);
63 }
64 
65 static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
66                               u32 &tid, u32 &stack) {
67   u64 context = atomic_load(atomic_context, memory_order_relaxed);
68   stack = context;
69   context >>= 32;
70   tid = context;
71 }
72 
73 // The memory chunk allocated from the underlying allocator looks like this:
74 // L L L L L L H H U U U U U U R R
75 //   L -- left redzone words (0 or more bytes)
76 //   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
77 //   U -- user memory.
78 //   R -- right redzone (0 or more bytes)
79 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
80 // memory.
81 
82 // If the left redzone is greater than the ChunkHeader size we store a magic
83 // value in the first uptr word of the memory block and store the address of
84 // ChunkBase in the next uptr.
85 // M B L L L L L L L L L  H H U U U U U U
86 //   |                    ^
87 //   ---------------------|
88 //   M -- magic value kAllocBegMagic
89 //   B -- address of ChunkHeader pointing to the first 'H'
90 
91 class ChunkHeader {
92  public:
93   atomic_uint8_t chunk_state;
94   u8 alloc_type : 2;
95   u8 lsan_tag : 2;
96 
97   // align < 8 -> 0
98   // else      -> log2(min(align, 512)) - 2
99   u8 user_requested_alignment_log : 3;
100 
101  private:
102   u16 user_requested_size_hi;
103   u32 user_requested_size_lo;
104   atomic_uint64_t alloc_context_id;
105 
106  public:
107   uptr UsedSize() const {
108     static_assert(sizeof(user_requested_size_lo) == 4,
109                   "Expression below requires this");
110     return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
111            user_requested_size_lo;
112   }
113 
114   void SetUsedSize(uptr size) {
115     user_requested_size_lo = size;
116     static_assert(sizeof(user_requested_size_lo) == 4,
117                   "Expression below requires this");
118     user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
119     CHECK_EQ(UsedSize(), size);
120   }
121 
122   void SetAllocContext(u32 tid, u32 stack) {
123     AtomicContextStore(&alloc_context_id, tid, stack);
124   }
125 
126   void GetAllocContext(u32 &tid, u32 &stack) const {
127     AtomicContextLoad(&alloc_context_id, tid, stack);
128   }
129 };
130 
131 class ChunkBase : public ChunkHeader {
132   atomic_uint64_t free_context_id;
133 
134  public:
135   void SetFreeContext(u32 tid, u32 stack) {
136     AtomicContextStore(&free_context_id, tid, stack);
137   }
138 
139   void GetFreeContext(u32 &tid, u32 &stack) const {
140     AtomicContextLoad(&free_context_id, tid, stack);
141   }
142 };
143 
144 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
145 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
146 COMPILER_CHECK(kChunkHeaderSize == 16);
147 COMPILER_CHECK(kChunkHeader2Size <= 16);
148 
149 enum {
150   // Either just allocated by underlying allocator, but AsanChunk is not yet
151   // ready, or almost returned to undelying allocator and AsanChunk is already
152   // meaningless.
153   CHUNK_INVALID = 0,
154   // The chunk is allocated and not yet freed.
155   CHUNK_ALLOCATED = 2,
156   // The chunk was freed and put into quarantine zone.
157   CHUNK_QUARANTINE = 3,
158 };
159 
160 class AsanChunk : public ChunkBase {
161  public:
162   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
163   bool AddrIsInside(uptr addr) {
164     return (addr >= Beg()) && (addr < Beg() + UsedSize());
165   }
166 };
167 
168 class LargeChunkHeader {
169   static constexpr uptr kAllocBegMagic =
170       FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
171   atomic_uintptr_t magic;
172   AsanChunk *chunk_header;
173 
174  public:
175   AsanChunk *Get() const {
176     return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
177                ? chunk_header
178                : nullptr;
179   }
180 
181   void Set(AsanChunk *p) {
182     if (p) {
183       chunk_header = p;
184       atomic_store(&magic, kAllocBegMagic, memory_order_release);
185       return;
186     }
187 
188     uptr old = kAllocBegMagic;
189     if (!atomic_compare_exchange_strong(&magic, &old, 0,
190                                         memory_order_release)) {
191       CHECK_EQ(old, kAllocBegMagic);
192     }
193   }
194 };
195 
196 static void FillChunk(AsanChunk *m) {
197   // FIXME: Use ReleaseMemoryPagesToOS.
198   Flags &fl = *flags();
199 
200   if (fl.max_free_fill_size > 0) {
201     // We have to skip the chunk header, it contains free_context_id.
202     uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
203     if (m->UsedSize() >= kChunkHeader2Size) {  // Skip Header2 in user area.
204       uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
205       size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
206       REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
207     }
208   }
209 }
210 
211 struct QuarantineCallback {
212   QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
213       : cache_(cache),
214         stack_(stack) {
215   }
216 
217   void PreQuarantine(AsanChunk *m) const {
218     FillChunk(m);
219     // Poison the region.
220     PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
221                  kAsanHeapFreeMagic);
222   }
223 
224   void Recycle(AsanChunk *m) const {
225     void *p = get_allocator().GetBlockBegin(m);
226 
227     // The secondary will immediately unpoison and unmap the memory, so this
228     // branch is unnecessary.
229     if (get_allocator().FromPrimary(p)) {
230       if (p != m) {
231         // Clear the magic value, as allocator internals may overwrite the
232         // contents of deallocated chunk, confusing GetAsanChunk lookup.
233         reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
234       }
235 
236       u8 old_chunk_state = CHUNK_QUARANTINE;
237       if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
238                                           CHUNK_INVALID,
239                                           memory_order_acquire)) {
240         CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
241       }
242 
243       PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
244                    kAsanHeapLeftRedzoneMagic);
245     }
246 
247     // Statistics.
248     AsanStats &thread_stats = GetCurrentThreadStats();
249     thread_stats.real_frees++;
250     thread_stats.really_freed += m->UsedSize();
251 
252     get_allocator().Deallocate(cache_, p);
253   }
254 
255   void RecyclePassThrough(AsanChunk *m) const {
256     // Recycle for the secondary will immediately unpoison and unmap the
257     // memory, so quarantine preparation is unnecessary.
258     if (get_allocator().FromPrimary(m)) {
259       // The primary allocation may need pattern fill if enabled.
260       FillChunk(m);
261     }
262     Recycle(m);
263   }
264 
265   void *Allocate(uptr size) const {
266     void *res = get_allocator().Allocate(cache_, size, 1);
267     // TODO(alekseys): Consider making quarantine OOM-friendly.
268     if (UNLIKELY(!res))
269       ReportOutOfMemory(size, stack_);
270     return res;
271   }
272 
273   void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
274 
275  private:
276   AllocatorCache* const cache_;
277   BufferedStackTrace* const stack_;
278 };
279 
280 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
281 typedef AsanQuarantine::Cache QuarantineCache;
282 
283 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
284   PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
285   // Statistics.
286   AsanStats &thread_stats = GetCurrentThreadStats();
287   thread_stats.mmaps++;
288   thread_stats.mmaped += size;
289 }
290 
291 void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
292                                           uptr user_size) const {
293   uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
294   user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
295   // The secondary mapping will be immediately returned to user, no value
296   // poisoning that with non-zero just before unpoisoning by Allocate(). So just
297   // poison head/tail invisible to Allocate().
298   PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
299   PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
300   // Statistics.
301   AsanStats &thread_stats = GetCurrentThreadStats();
302   thread_stats.mmaps++;
303   thread_stats.mmaped += size;
304 }
305 
306 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
307   PoisonShadow(p, size, 0);
308   // We are about to unmap a chunk of user memory.
309   // Mark the corresponding shadow memory as not needed.
310   FlushUnneededASanShadowMemory(p, size);
311   // Statistics.
312   AsanStats &thread_stats = GetCurrentThreadStats();
313   thread_stats.munmaps++;
314   thread_stats.munmaped += size;
315 }
316 
317 // We can not use THREADLOCAL because it is not supported on some of the
318 // platforms we care about (OSX 10.6, Android).
319 // static THREADLOCAL AllocatorCache cache;
320 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
321   CHECK(ms);
322   return &ms->allocator_cache;
323 }
324 
325 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
326   CHECK(ms);
327   CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
328   return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
329 }
330 
331 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
332   quarantine_size_mb = f->quarantine_size_mb;
333   thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
334   min_redzone = f->redzone;
335   max_redzone = f->max_redzone;
336   may_return_null = cf->allocator_may_return_null;
337   alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
338   release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
339 }
340 
341 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
342   f->quarantine_size_mb = quarantine_size_mb;
343   f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
344   f->redzone = min_redzone;
345   f->max_redzone = max_redzone;
346   cf->allocator_may_return_null = may_return_null;
347   f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
348   cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
349 }
350 
351 struct Allocator {
352   static const uptr kMaxAllowedMallocSize =
353       FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
354 
355   AsanAllocator allocator;
356   AsanQuarantine quarantine;
357   StaticSpinMutex fallback_mutex;
358   AllocatorCache fallback_allocator_cache;
359   QuarantineCache fallback_quarantine_cache;
360 
361   uptr max_user_defined_malloc_size;
362 
363   // ------------------- Options --------------------------
364   atomic_uint16_t min_redzone;
365   atomic_uint16_t max_redzone;
366   atomic_uint8_t alloc_dealloc_mismatch;
367 
368   // ------------------- Initialization ------------------------
369   explicit Allocator(LinkerInitialized)
370       : quarantine(LINKER_INITIALIZED),
371         fallback_quarantine_cache(LINKER_INITIALIZED) {}
372 
373   void CheckOptions(const AllocatorOptions &options) const {
374     CHECK_GE(options.min_redzone, 16);
375     CHECK_GE(options.max_redzone, options.min_redzone);
376     CHECK_LE(options.max_redzone, 2048);
377     CHECK(IsPowerOfTwo(options.min_redzone));
378     CHECK(IsPowerOfTwo(options.max_redzone));
379   }
380 
381   void SharedInitCode(const AllocatorOptions &options) {
382     CheckOptions(options);
383     quarantine.Init((uptr)options.quarantine_size_mb << 20,
384                     (uptr)options.thread_local_quarantine_size_kb << 10);
385     atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
386                  memory_order_release);
387     atomic_store(&min_redzone, options.min_redzone, memory_order_release);
388     atomic_store(&max_redzone, options.max_redzone, memory_order_release);
389   }
390 
391   void InitLinkerInitialized(const AllocatorOptions &options) {
392     SetAllocatorMayReturnNull(options.may_return_null);
393     allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
394     SharedInitCode(options);
395     max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
396                                        ? common_flags()->max_allocation_size_mb
397                                              << 20
398                                        : kMaxAllowedMallocSize;
399   }
400 
401   void RePoisonChunk(uptr chunk) {
402     // This could be a user-facing chunk (with redzones), or some internal
403     // housekeeping chunk, like TransferBatch. Start by assuming the former.
404     AsanChunk *ac = GetAsanChunk((void *)chunk);
405     uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
406     if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
407                   CHUNK_ALLOCATED) {
408       uptr beg = ac->Beg();
409       uptr end = ac->Beg() + ac->UsedSize();
410       uptr chunk_end = chunk + allocated_size;
411       if (chunk < beg && beg < end && end <= chunk_end) {
412         // Looks like a valid AsanChunk in use, poison redzones only.
413         PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
414         uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
415         FastPoisonShadowPartialRightRedzone(
416             end_aligned_down, end - end_aligned_down,
417             chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
418         return;
419       }
420     }
421 
422     // This is either not an AsanChunk or freed or quarantined AsanChunk.
423     // In either case, poison everything.
424     PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
425   }
426 
427   void ReInitialize(const AllocatorOptions &options) {
428     SetAllocatorMayReturnNull(options.may_return_null);
429     allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
430     SharedInitCode(options);
431 
432     // Poison all existing allocation's redzones.
433     if (CanPoisonMemory()) {
434       allocator.ForceLock();
435       allocator.ForEachChunk(
436           [](uptr chunk, void *alloc) {
437             ((Allocator *)alloc)->RePoisonChunk(chunk);
438           },
439           this);
440       allocator.ForceUnlock();
441     }
442   }
443 
444   void GetOptions(AllocatorOptions *options) const {
445     options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
446     options->thread_local_quarantine_size_kb =
447         quarantine.GetMaxCacheSize() >> 10;
448     options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
449     options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
450     options->may_return_null = AllocatorMayReturnNull();
451     options->alloc_dealloc_mismatch =
452         atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
453     options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
454   }
455 
456   // -------------------- Helper methods. -------------------------
457   uptr ComputeRZLog(uptr user_requested_size) {
458     u32 rz_log = user_requested_size <= 64 - 16            ? 0
459                  : user_requested_size <= 128 - 32         ? 1
460                  : user_requested_size <= 512 - 64         ? 2
461                  : user_requested_size <= 4096 - 128       ? 3
462                  : user_requested_size <= (1 << 14) - 256  ? 4
463                  : user_requested_size <= (1 << 15) - 512  ? 5
464                  : user_requested_size <= (1 << 16) - 1024 ? 6
465                                                            : 7;
466     u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
467     u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
468     u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
469     return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
470   }
471 
472   static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
473     if (user_requested_alignment < 8)
474       return 0;
475     if (user_requested_alignment > 512)
476       user_requested_alignment = 512;
477     return Log2(user_requested_alignment) - 2;
478   }
479 
480   static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
481     if (user_requested_alignment_log == 0)
482       return 0;
483     return 1LL << (user_requested_alignment_log + 2);
484   }
485 
486   // We have an address between two chunks, and we want to report just one.
487   AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
488                          AsanChunk *right_chunk) {
489     if (!left_chunk)
490       return right_chunk;
491     if (!right_chunk)
492       return left_chunk;
493     // Prefer an allocated chunk over freed chunk and freed chunk
494     // over available chunk.
495     u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
496     u8 right_state =
497         atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
498     if (left_state != right_state) {
499       if (left_state == CHUNK_ALLOCATED)
500         return left_chunk;
501       if (right_state == CHUNK_ALLOCATED)
502         return right_chunk;
503       if (left_state == CHUNK_QUARANTINE)
504         return left_chunk;
505       if (right_state == CHUNK_QUARANTINE)
506         return right_chunk;
507     }
508     // Same chunk_state: choose based on offset.
509     sptr l_offset = 0, r_offset = 0;
510     CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
511     CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
512     if (l_offset < r_offset)
513       return left_chunk;
514     return right_chunk;
515   }
516 
517   bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
518     AsanChunk *m = GetAsanChunkByAddr(addr);
519     if (!m) return false;
520     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
521       return false;
522     if (m->Beg() != addr) return false;
523     AsanThread *t = GetCurrentThread();
524     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
525     return true;
526   }
527 
528   // -------------------- Allocation/Deallocation routines ---------------
529   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
530                  AllocType alloc_type, bool can_fill) {
531     if (UNLIKELY(!AsanInited()))
532       AsanInitFromRtl();
533     if (UNLIKELY(IsRssLimitExceeded())) {
534       if (AllocatorMayReturnNull())
535         return nullptr;
536       ReportRssLimitExceeded(stack);
537     }
538     Flags &fl = *flags();
539     CHECK(stack);
540     const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
541     const uptr user_requested_alignment_log =
542         ComputeUserRequestedAlignmentLog(alignment);
543     if (alignment < min_alignment)
544       alignment = min_alignment;
545     if (size == 0) {
546       // We'd be happy to avoid allocating memory for zero-size requests, but
547       // some programs/tests depend on this behavior and assume that malloc
548       // would not return NULL even for zero-size allocations. Moreover, it
549       // looks like operator new should never return NULL, and results of
550       // consecutive "new" calls must be different even if the allocated size
551       // is zero.
552       size = 1;
553     }
554     CHECK(IsPowerOfTwo(alignment));
555     uptr rz_log = ComputeRZLog(size);
556     uptr rz_size = RZLog2Size(rz_log);
557     uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
558     uptr needed_size = rounded_size + rz_size;
559     if (alignment > min_alignment)
560       needed_size += alignment;
561     bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
562     // If we are allocating from the secondary allocator, there will be no
563     // automatic right redzone, so add the right redzone manually.
564     if (!from_primary)
565       needed_size += rz_size;
566     CHECK(IsAligned(needed_size, min_alignment));
567     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
568         size > max_user_defined_malloc_size) {
569       if (AllocatorMayReturnNull()) {
570         Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
571                size);
572         return nullptr;
573       }
574       uptr malloc_limit =
575           Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
576       ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
577     }
578 
579     AsanThread *t = GetCurrentThread();
580     void *allocated;
581     if (t) {
582       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
583       allocated = allocator.Allocate(cache, needed_size, 8);
584     } else {
585       SpinMutexLock l(&fallback_mutex);
586       AllocatorCache *cache = &fallback_allocator_cache;
587       allocated = allocator.Allocate(cache, needed_size, 8);
588     }
589     if (UNLIKELY(!allocated)) {
590       SetAllocatorOutOfMemory();
591       if (AllocatorMayReturnNull())
592         return nullptr;
593       ReportOutOfMemory(size, stack);
594     }
595 
596     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
597     uptr alloc_end = alloc_beg + needed_size;
598     uptr user_beg = alloc_beg + rz_size;
599     if (!IsAligned(user_beg, alignment))
600       user_beg = RoundUpTo(user_beg, alignment);
601     uptr user_end = user_beg + size;
602     CHECK_LE(user_end, alloc_end);
603     uptr chunk_beg = user_beg - kChunkHeaderSize;
604     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
605     m->alloc_type = alloc_type;
606     CHECK(size);
607     m->SetUsedSize(size);
608     m->user_requested_alignment_log = user_requested_alignment_log;
609 
610     m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
611 
612     if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
613       // The allocator provides an unpoisoned chunk. This is possible for the
614       // secondary allocator, or if CanPoisonMemory() was false for some time,
615       // for example, due to flags()->start_disabled. Anyway, poison left and
616       // right of the block before using it for anything else.
617       uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
618       uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
619       PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
620       PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
621     }
622 
623     uptr size_rounded_down_to_granularity =
624         RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
625     // Unpoison the bulk of the memory region.
626     if (size_rounded_down_to_granularity)
627       PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
628     // Deal with the end of the region if size is not aligned to granularity.
629     if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
630       u8 *shadow =
631           (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
632       *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
633     }
634 
635     AsanStats &thread_stats = GetCurrentThreadStats();
636     thread_stats.mallocs++;
637     thread_stats.malloced += size;
638     thread_stats.malloced_redzones += needed_size - size;
639     if (needed_size > SizeClassMap::kMaxSize)
640       thread_stats.malloc_large++;
641     else
642       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
643 
644     void *res = reinterpret_cast<void *>(user_beg);
645     if (can_fill && fl.max_malloc_fill_size) {
646       uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
647       REAL(memset)(res, fl.malloc_fill_byte, fill_size);
648     }
649 #if CAN_SANITIZE_LEAKS
650     m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
651                                                  : __lsan::kDirectlyLeaked;
652 #endif
653     // Must be the last mutation of metadata in this function.
654     atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
655     if (alloc_beg != chunk_beg) {
656       CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
657       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
658     }
659     RunMallocHooks(res, size);
660     return res;
661   }
662 
663   // Set quarantine flag if chunk is allocated, issue ASan error report on
664   // available and quarantined chunks. Return true on success, false otherwise.
665   bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
666                                               BufferedStackTrace *stack) {
667     u8 old_chunk_state = CHUNK_ALLOCATED;
668     // Flip the chunk_state atomically to avoid race on double-free.
669     if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
670                                         CHUNK_QUARANTINE,
671                                         memory_order_acquire)) {
672       ReportInvalidFree(ptr, old_chunk_state, stack);
673       // It's not safe to push a chunk in quarantine on invalid free.
674       return false;
675     }
676     CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
677     // It was a user data.
678     m->SetFreeContext(kInvalidTid, 0);
679     return true;
680   }
681 
682   // Expects the chunk to already be marked as quarantined by using
683   // AtomicallySetQuarantineFlagIfAllocated.
684   void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
685     CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
686              CHUNK_QUARANTINE);
687     AsanThread *t = GetCurrentThread();
688     m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
689 
690     // Push into quarantine.
691     if (t) {
692       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
693       AllocatorCache *ac = GetAllocatorCache(ms);
694       quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
695                      m->UsedSize());
696     } else {
697       SpinMutexLock l(&fallback_mutex);
698       AllocatorCache *ac = &fallback_allocator_cache;
699       quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
700                      m, m->UsedSize());
701     }
702   }
703 
704   void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
705                   BufferedStackTrace *stack, AllocType alloc_type) {
706     uptr p = reinterpret_cast<uptr>(ptr);
707     if (p == 0) return;
708 
709     uptr chunk_beg = p - kChunkHeaderSize;
710     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
711 
712     // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
713     // malloc. Don't report an invalid free in this case.
714     if (SANITIZER_WINDOWS &&
715         !get_allocator().PointerIsMine(ptr)) {
716       if (!IsSystemHeapAddress(p))
717         ReportFreeNotMalloced(p, stack);
718       return;
719     }
720 
721     if (RunFreeHooks(ptr)) {
722       // Someone used __sanitizer_ignore_free_hook() and decided that they
723       // didn't want the memory to __sanitizer_ignore_free_hook freed right now.
724       // When they call free() on this pointer again at a later time, we should
725       // ignore the alloc-type mismatch and allow them to deallocate the pointer
726       // through free(), rather than the initial alloc type.
727       m->alloc_type = FROM_MALLOC;
728       return;
729     }
730 
731     // Must mark the chunk as quarantined before any changes to its metadata.
732     // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
733     if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
734 
735     if (m->alloc_type != alloc_type) {
736       if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire) &&
737           !IsAllocDeallocMismatchSuppressed(stack)) {
738         ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
739                                 (AllocType)alloc_type);
740       }
741     } else {
742       if (flags()->new_delete_type_mismatch &&
743           (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
744           ((delete_size && delete_size != m->UsedSize()) ||
745            ComputeUserRequestedAlignmentLog(delete_alignment) !=
746                m->user_requested_alignment_log)) {
747         ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
748       }
749     }
750 
751     AsanStats &thread_stats = GetCurrentThreadStats();
752     thread_stats.frees++;
753     thread_stats.freed += m->UsedSize();
754 
755     QuarantineChunk(m, ptr, stack);
756   }
757 
758   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
759     CHECK(old_ptr && new_size);
760     uptr p = reinterpret_cast<uptr>(old_ptr);
761     uptr chunk_beg = p - kChunkHeaderSize;
762     AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
763 
764     AsanStats &thread_stats = GetCurrentThreadStats();
765     thread_stats.reallocs++;
766     thread_stats.realloced += new_size;
767 
768     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
769     if (new_ptr) {
770       u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
771       if (chunk_state != CHUNK_ALLOCATED)
772         ReportInvalidFree(old_ptr, chunk_state, stack);
773       CHECK_NE(REAL(memcpy), nullptr);
774       uptr memcpy_size = Min(new_size, m->UsedSize());
775       // If realloc() races with free(), we may start copying freed memory.
776       // However, we will report racy double-free later anyway.
777       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
778       Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
779     }
780     return new_ptr;
781   }
782 
783   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
784     if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
785       if (AllocatorMayReturnNull())
786         return nullptr;
787       ReportCallocOverflow(nmemb, size, stack);
788     }
789     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
790     // If the memory comes from the secondary allocator no need to clear it
791     // as it comes directly from mmap.
792     if (ptr && allocator.FromPrimary(ptr))
793       REAL(memset)(ptr, 0, nmemb * size);
794     return ptr;
795   }
796 
797   void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
798     if (chunk_state == CHUNK_QUARANTINE)
799       ReportDoubleFree((uptr)ptr, stack);
800     else
801       ReportFreeNotMalloced((uptr)ptr, stack);
802   }
803 
804   void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
805     AllocatorCache *ac = GetAllocatorCache(ms);
806     quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
807     allocator.SwallowCache(ac);
808   }
809 
810   // -------------------------- Chunk lookup ----------------------
811 
812   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
813   // Returns nullptr if AsanChunk is not yet initialized just after
814   // get_allocator().Allocate(), or is being destroyed just before
815   // get_allocator().Deallocate().
816   AsanChunk *GetAsanChunk(void *alloc_beg) {
817     if (!alloc_beg)
818       return nullptr;
819     AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
820     if (!p) {
821       if (!allocator.FromPrimary(alloc_beg))
822         return nullptr;
823       p = reinterpret_cast<AsanChunk *>(alloc_beg);
824     }
825     u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
826     // It does not guaranty that Chunk is initialized, but it's
827     // definitely not for any other value.
828     if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
829       return p;
830     return nullptr;
831   }
832 
833   AsanChunk *GetAsanChunkByAddr(uptr p) {
834     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
835     return GetAsanChunk(alloc_beg);
836   }
837 
838   // Allocator must be locked when this function is called.
839   AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
840     void *alloc_beg =
841         allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
842     return GetAsanChunk(alloc_beg);
843   }
844 
845   uptr AllocationSize(uptr p) {
846     AsanChunk *m = GetAsanChunkByAddr(p);
847     if (!m) return 0;
848     if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
849       return 0;
850     if (m->Beg() != p) return 0;
851     return m->UsedSize();
852   }
853 
854   uptr AllocationSizeFast(uptr p) {
855     return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
856   }
857 
858   AsanChunkView FindHeapChunkByAddress(uptr addr) {
859     AsanChunk *m1 = GetAsanChunkByAddr(addr);
860     sptr offset = 0;
861     if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
862       // The address is in the chunk's left redzone, so maybe it is actually
863       // a right buffer overflow from the other chunk before.
864       // Search a bit before to see if there is another chunk.
865       AsanChunk *m2 = nullptr;
866       for (uptr l = 1; l < GetPageSizeCached(); l++) {
867         m2 = GetAsanChunkByAddr(addr - l);
868         if (m2 == m1) continue;  // Still the same chunk.
869         break;
870       }
871       if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
872         m1 = ChooseChunk(addr, m2, m1);
873     }
874     return AsanChunkView(m1);
875   }
876 
877   void Purge(BufferedStackTrace *stack) {
878     AsanThread *t = GetCurrentThread();
879     if (t) {
880       AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
881       quarantine.DrainAndRecycle(GetQuarantineCache(ms),
882                                  QuarantineCallback(GetAllocatorCache(ms),
883                                                     stack));
884     }
885     {
886       SpinMutexLock l(&fallback_mutex);
887       quarantine.DrainAndRecycle(&fallback_quarantine_cache,
888                                  QuarantineCallback(&fallback_allocator_cache,
889                                                     stack));
890     }
891 
892     allocator.ForceReleaseToOS();
893   }
894 
895   void PrintStats() {
896     allocator.PrintStats();
897     quarantine.PrintStats();
898   }
899 
900   void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
901     allocator.ForceLock();
902     fallback_mutex.Lock();
903   }
904 
905   void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
906     fallback_mutex.Unlock();
907     allocator.ForceUnlock();
908   }
909 };
910 
911 static Allocator instance(LINKER_INITIALIZED);
912 
913 static AsanAllocator &get_allocator() {
914   return instance.allocator;
915 }
916 
917 bool AsanChunkView::IsValid() const {
918   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
919                        CHUNK_INVALID;
920 }
921 bool AsanChunkView::IsAllocated() const {
922   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
923                        CHUNK_ALLOCATED;
924 }
925 bool AsanChunkView::IsQuarantined() const {
926   return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
927                        CHUNK_QUARANTINE;
928 }
929 uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
930 uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
931 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
932 u32 AsanChunkView::UserRequestedAlignment() const {
933   return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
934 }
935 
936 uptr AsanChunkView::AllocTid() const {
937   u32 tid = 0;
938   u32 stack = 0;
939   chunk_->GetAllocContext(tid, stack);
940   return tid;
941 }
942 
943 uptr AsanChunkView::FreeTid() const {
944   if (!IsQuarantined())
945     return kInvalidTid;
946   u32 tid = 0;
947   u32 stack = 0;
948   chunk_->GetFreeContext(tid, stack);
949   return tid;
950 }
951 
952 AllocType AsanChunkView::GetAllocType() const {
953   return (AllocType)chunk_->alloc_type;
954 }
955 
956 u32 AsanChunkView::GetAllocStackId() const {
957   u32 tid = 0;
958   u32 stack = 0;
959   chunk_->GetAllocContext(tid, stack);
960   return stack;
961 }
962 
963 u32 AsanChunkView::GetFreeStackId() const {
964   if (!IsQuarantined())
965     return 0;
966   u32 tid = 0;
967   u32 stack = 0;
968   chunk_->GetFreeContext(tid, stack);
969   return stack;
970 }
971 
972 void InitializeAllocator(const AllocatorOptions &options) {
973   instance.InitLinkerInitialized(options);
974 }
975 
976 void ReInitializeAllocator(const AllocatorOptions &options) {
977   instance.ReInitialize(options);
978 }
979 
980 void GetAllocatorOptions(AllocatorOptions *options) {
981   instance.GetOptions(options);
982 }
983 
984 AsanChunkView FindHeapChunkByAddress(uptr addr) {
985   return instance.FindHeapChunkByAddress(addr);
986 }
987 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
988   return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
989 }
990 
991 void AsanThreadLocalMallocStorage::CommitBack() {
992   GET_STACK_TRACE_MALLOC;
993   instance.CommitBack(this, &stack);
994 }
995 
996 void PrintInternalAllocatorStats() {
997   instance.PrintStats();
998 }
999 
1000 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
1001   instance.Deallocate(ptr, 0, 0, stack, alloc_type);
1002 }
1003 
1004 void asan_delete(void *ptr, uptr size, uptr alignment,
1005                  BufferedStackTrace *stack, AllocType alloc_type) {
1006   instance.Deallocate(ptr, size, alignment, stack, alloc_type);
1007 }
1008 
1009 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
1010   return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1011 }
1012 
1013 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
1014   return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
1015 }
1016 
1017 void *asan_reallocarray(void *p, uptr nmemb, uptr size,
1018                         BufferedStackTrace *stack) {
1019   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
1020     errno = errno_ENOMEM;
1021     if (AllocatorMayReturnNull())
1022       return nullptr;
1023     ReportReallocArrayOverflow(nmemb, size, stack);
1024   }
1025   return asan_realloc(p, nmemb * size, stack);
1026 }
1027 
1028 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
1029   if (!p)
1030     return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
1031   if (size == 0) {
1032     if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
1033       instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
1034       return nullptr;
1035     }
1036     // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1037     size = 1;
1038   }
1039   return SetErrnoOnNull(instance.Reallocate(p, size, stack));
1040 }
1041 
1042 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
1043   return SetErrnoOnNull(
1044       instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
1045 }
1046 
1047 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
1048   uptr PageSize = GetPageSizeCached();
1049   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
1050     errno = errno_ENOMEM;
1051     if (AllocatorMayReturnNull())
1052       return nullptr;
1053     ReportPvallocOverflow(size, stack);
1054   }
1055   // pvalloc(0) should allocate one page.
1056   size = size ? RoundUpTo(size, PageSize) : PageSize;
1057   return SetErrnoOnNull(
1058       instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
1059 }
1060 
1061 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
1062                     AllocType alloc_type) {
1063   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
1064     errno = errno_EINVAL;
1065     if (AllocatorMayReturnNull())
1066       return nullptr;
1067     ReportInvalidAllocationAlignment(alignment, stack);
1068   }
1069   return SetErrnoOnNull(
1070       instance.Allocate(size, alignment, stack, alloc_type, true));
1071 }
1072 
1073 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
1074   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
1075     errno = errno_EINVAL;
1076     if (AllocatorMayReturnNull())
1077       return nullptr;
1078     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
1079   }
1080   return SetErrnoOnNull(
1081       instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
1082 }
1083 
1084 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
1085                         BufferedStackTrace *stack) {
1086   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
1087     if (AllocatorMayReturnNull())
1088       return errno_EINVAL;
1089     ReportInvalidPosixMemalignAlignment(alignment, stack);
1090   }
1091   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
1092   if (UNLIKELY(!ptr))
1093     // OOM error is already taken care of by Allocate.
1094     return errno_ENOMEM;
1095   CHECK(IsAligned((uptr)ptr, alignment));
1096   *memptr = ptr;
1097   return 0;
1098 }
1099 
1100 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
1101   if (!ptr) return 0;
1102   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1103   if (flags()->check_malloc_usable_size && (usable_size == 0)) {
1104     GET_STACK_TRACE_FATAL(pc, bp);
1105     ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
1106   }
1107   return usable_size;
1108 }
1109 
1110 uptr asan_mz_size(const void *ptr) {
1111   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
1112 }
1113 
1114 void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1115   instance.ForceLock();
1116 }
1117 
1118 void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1119   instance.ForceUnlock();
1120 }
1121 
1122 }  // namespace __asan
1123 
1124 // --- Implementation of LSan-specific functions --- {{{1
1125 namespace __lsan {
1126 void LockAllocator() {
1127   __asan::get_allocator().ForceLock();
1128 }
1129 
1130 void UnlockAllocator() {
1131   __asan::get_allocator().ForceUnlock();
1132 }
1133 
1134 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1135   *begin = (uptr)&__asan::get_allocator();
1136   *end = *begin + sizeof(__asan::get_allocator());
1137 }
1138 
1139 uptr PointsIntoChunk(void *p) {
1140   uptr addr = reinterpret_cast<uptr>(p);
1141   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1142   if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
1143                 __asan::CHUNK_ALLOCATED)
1144     return 0;
1145   uptr chunk = m->Beg();
1146   if (m->AddrIsInside(addr))
1147     return chunk;
1148   if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
1149     return chunk;
1150   return 0;
1151 }
1152 
1153 uptr GetUserBegin(uptr chunk) {
1154   // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1155   // not needed.
1156   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1157   return m ? m->Beg() : 0;
1158 }
1159 
1160 uptr GetUserAddr(uptr chunk) {
1161   return chunk;
1162 }
1163 
1164 LsanMetadata::LsanMetadata(uptr chunk) {
1165   metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
1166                     : nullptr;
1167 }
1168 
1169 bool LsanMetadata::allocated() const {
1170   if (!metadata_)
1171     return false;
1172   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1173   return atomic_load(&m->chunk_state, memory_order_relaxed) ==
1174          __asan::CHUNK_ALLOCATED;
1175 }
1176 
1177 ChunkTag LsanMetadata::tag() const {
1178   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1179   return static_cast<ChunkTag>(m->lsan_tag);
1180 }
1181 
1182 void LsanMetadata::set_tag(ChunkTag value) {
1183   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1184   m->lsan_tag = value;
1185 }
1186 
1187 uptr LsanMetadata::requested_size() const {
1188   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1189   return m->UsedSize();
1190 }
1191 
1192 u32 LsanMetadata::stack_trace_id() const {
1193   __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1194   u32 tid = 0;
1195   u32 stack = 0;
1196   m->GetAllocContext(tid, stack);
1197   return stack;
1198 }
1199 
1200 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1201   __asan::get_allocator().ForEachChunk(callback, arg);
1202 }
1203 
1204 IgnoreObjectResult IgnoreObject(const void *p) {
1205   uptr addr = reinterpret_cast<uptr>(p);
1206   __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1207   if (!m ||
1208       (atomic_load(&m->chunk_state, memory_order_acquire) !=
1209        __asan::CHUNK_ALLOCATED) ||
1210       !m->AddrIsInside(addr)) {
1211     return kIgnoreObjectInvalid;
1212   }
1213   if (m->lsan_tag == kIgnored)
1214     return kIgnoreObjectAlreadyIgnored;
1215   m->lsan_tag = __lsan::kIgnored;
1216   return kIgnoreObjectSuccess;
1217 }
1218 
1219 }  // namespace __lsan
1220 
1221 // ---------------------- Interface ---------------- {{{1
1222 using namespace __asan;
1223 
1224 static const void *AllocationBegin(const void *p) {
1225   AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
1226   if (!m)
1227     return nullptr;
1228   if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
1229     return nullptr;
1230   if (m->UsedSize() == 0)
1231     return nullptr;
1232   return (const void *)(m->Beg());
1233 }
1234 
1235 // ASan allocator doesn't reserve extra bytes, so normally we would
1236 // just return "size". We don't want to expose our redzone sizes, etc here.
1237 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1238   return size;
1239 }
1240 
1241 int __sanitizer_get_ownership(const void *p) {
1242   uptr ptr = reinterpret_cast<uptr>(p);
1243   return instance.AllocationSize(ptr) > 0;
1244 }
1245 
1246 uptr __sanitizer_get_allocated_size(const void *p) {
1247   if (!p) return 0;
1248   uptr ptr = reinterpret_cast<uptr>(p);
1249   uptr allocated_size = instance.AllocationSize(ptr);
1250   // Die if p is not malloced or if it is already freed.
1251   if (allocated_size == 0) {
1252     GET_STACK_TRACE_FATAL_HERE;
1253     ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1254   }
1255   return allocated_size;
1256 }
1257 
1258 uptr __sanitizer_get_allocated_size_fast(const void *p) {
1259   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
1260   uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
1261   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
1262   return ret;
1263 }
1264 
1265 const void *__sanitizer_get_allocated_begin(const void *p) {
1266   return AllocationBegin(p);
1267 }
1268 
1269 void __sanitizer_purge_allocator() {
1270   GET_STACK_TRACE_MALLOC;
1271   instance.Purge(&stack);
1272 }
1273 
1274 int __asan_update_allocation_context(void* addr) {
1275   GET_STACK_TRACE_MALLOC;
1276   return instance.UpdateAllocationStack((uptr)addr, &stack);
1277 }
1278