xref: /openbsd-src/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_mman.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- tsan_mman.cpp -----------------------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of ThreadSanitizer (TSan), a race detector.
103cab2bb3Spatrick //
113cab2bb3Spatrick //===----------------------------------------------------------------------===//
123cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_checks.h"
133cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_interface.h"
143cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_report.h"
153cab2bb3Spatrick #include "sanitizer_common/sanitizer_common.h"
163cab2bb3Spatrick #include "sanitizer_common/sanitizer_errno.h"
173cab2bb3Spatrick #include "sanitizer_common/sanitizer_placement_new.h"
183cab2bb3Spatrick #include "tsan_mman.h"
193cab2bb3Spatrick #include "tsan_rtl.h"
203cab2bb3Spatrick #include "tsan_report.h"
213cab2bb3Spatrick #include "tsan_flags.h"
223cab2bb3Spatrick 
233cab2bb3Spatrick namespace __tsan {
243cab2bb3Spatrick 
253cab2bb3Spatrick struct MapUnmapCallback {
OnMap__tsan::MapUnmapCallback263cab2bb3Spatrick   void OnMap(uptr p, uptr size) const { }
OnUnmap__tsan::MapUnmapCallback273cab2bb3Spatrick   void OnUnmap(uptr p, uptr size) const {
283cab2bb3Spatrick     // We are about to unmap a chunk of user memory.
293cab2bb3Spatrick     // Mark the corresponding shadow memory as not needed.
303cab2bb3Spatrick     DontNeedShadowFor(p, size);
313cab2bb3Spatrick     // Mark the corresponding meta shadow memory as not needed.
323cab2bb3Spatrick     // Note the block does not contain any meta info at this point
333cab2bb3Spatrick     // (this happens after free).
343cab2bb3Spatrick     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
353cab2bb3Spatrick     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
363cab2bb3Spatrick     // Block came from LargeMmapAllocator, so must be large.
373cab2bb3Spatrick     // We rely on this in the calculations below.
383cab2bb3Spatrick     CHECK_GE(size, 2 * kPageSize);
393cab2bb3Spatrick     uptr diff = RoundUp(p, kPageSize) - p;
403cab2bb3Spatrick     if (diff != 0) {
413cab2bb3Spatrick       p += diff;
423cab2bb3Spatrick       size -= diff;
433cab2bb3Spatrick     }
443cab2bb3Spatrick     diff = p + size - RoundDown(p + size, kPageSize);
453cab2bb3Spatrick     if (diff != 0)
463cab2bb3Spatrick       size -= diff;
473cab2bb3Spatrick     uptr p_meta = (uptr)MemToMeta(p);
483cab2bb3Spatrick     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
493cab2bb3Spatrick   }
503cab2bb3Spatrick };
513cab2bb3Spatrick 
523cab2bb3Spatrick static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
allocator()533cab2bb3Spatrick Allocator *allocator() {
543cab2bb3Spatrick   return reinterpret_cast<Allocator*>(&allocator_placeholder);
553cab2bb3Spatrick }
563cab2bb3Spatrick 
573cab2bb3Spatrick struct GlobalProc {
583cab2bb3Spatrick   Mutex mtx;
593cab2bb3Spatrick   Processor *proc;
60*810390e3Srobert   // This mutex represents the internal allocator combined for
61*810390e3Srobert   // the purposes of deadlock detection. The internal allocator
62*810390e3Srobert   // uses multiple mutexes, moreover they are locked only occasionally
63*810390e3Srobert   // and they are spin mutexes which don't support deadlock detection.
64*810390e3Srobert   // So we use this fake mutex to serve as a substitute for these mutexes.
65*810390e3Srobert   CheckedMutex internal_alloc_mtx;
663cab2bb3Spatrick 
GlobalProc__tsan::GlobalProc67*810390e3Srobert   GlobalProc()
68*810390e3Srobert       : mtx(MutexTypeGlobalProc),
69*810390e3Srobert         proc(ProcCreate()),
70*810390e3Srobert         internal_alloc_mtx(MutexTypeInternalAlloc) {}
713cab2bb3Spatrick };
723cab2bb3Spatrick 
733cab2bb3Spatrick static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
global_proc()743cab2bb3Spatrick GlobalProc *global_proc() {
753cab2bb3Spatrick   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
763cab2bb3Spatrick }
773cab2bb3Spatrick 
InternalAllocAccess()78*810390e3Srobert static void InternalAllocAccess() {
79*810390e3Srobert   global_proc()->internal_alloc_mtx.Lock();
80*810390e3Srobert   global_proc()->internal_alloc_mtx.Unlock();
81*810390e3Srobert }
82*810390e3Srobert 
ScopedGlobalProcessor()833cab2bb3Spatrick ScopedGlobalProcessor::ScopedGlobalProcessor() {
843cab2bb3Spatrick   GlobalProc *gp = global_proc();
853cab2bb3Spatrick   ThreadState *thr = cur_thread();
863cab2bb3Spatrick   if (thr->proc())
873cab2bb3Spatrick     return;
883cab2bb3Spatrick   // If we don't have a proc, use the global one.
893cab2bb3Spatrick   // There are currently only two known case where this path is triggered:
903cab2bb3Spatrick   //   __interceptor_free
913cab2bb3Spatrick   //   __nptl_deallocate_tsd
923cab2bb3Spatrick   //   start_thread
933cab2bb3Spatrick   //   clone
943cab2bb3Spatrick   // and:
953cab2bb3Spatrick   //   ResetRange
963cab2bb3Spatrick   //   __interceptor_munmap
973cab2bb3Spatrick   //   __deallocate_stack
983cab2bb3Spatrick   //   start_thread
993cab2bb3Spatrick   //   clone
1003cab2bb3Spatrick   // Ideally, we destroy thread state (and unwire proc) when a thread actually
1013cab2bb3Spatrick   // exits (i.e. when we join/wait it). Then we would not need the global proc
1023cab2bb3Spatrick   gp->mtx.Lock();
1033cab2bb3Spatrick   ProcWire(gp->proc, thr);
1043cab2bb3Spatrick }
1053cab2bb3Spatrick 
~ScopedGlobalProcessor()1063cab2bb3Spatrick ScopedGlobalProcessor::~ScopedGlobalProcessor() {
1073cab2bb3Spatrick   GlobalProc *gp = global_proc();
1083cab2bb3Spatrick   ThreadState *thr = cur_thread();
1093cab2bb3Spatrick   if (thr->proc() != gp->proc)
1103cab2bb3Spatrick     return;
1113cab2bb3Spatrick   ProcUnwire(gp->proc, thr);
1123cab2bb3Spatrick   gp->mtx.Unlock();
1133cab2bb3Spatrick }
1143cab2bb3Spatrick 
AllocatorLock()115*810390e3Srobert void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
116*810390e3Srobert   global_proc()->internal_alloc_mtx.Lock();
117*810390e3Srobert   InternalAllocatorLock();
118*810390e3Srobert }
119*810390e3Srobert 
AllocatorUnlock()120*810390e3Srobert void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
121*810390e3Srobert   InternalAllocatorUnlock();
122*810390e3Srobert   global_proc()->internal_alloc_mtx.Unlock();
123*810390e3Srobert }
124*810390e3Srobert 
GlobalProcessorLock()125*810390e3Srobert void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
126*810390e3Srobert   global_proc()->mtx.Lock();
127*810390e3Srobert }
128*810390e3Srobert 
GlobalProcessorUnlock()129*810390e3Srobert void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
130*810390e3Srobert   global_proc()->mtx.Unlock();
131*810390e3Srobert }
132*810390e3Srobert 
1333cab2bb3Spatrick static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
1343cab2bb3Spatrick static uptr max_user_defined_malloc_size;
1353cab2bb3Spatrick 
InitializeAllocator()1363cab2bb3Spatrick void InitializeAllocator() {
1373cab2bb3Spatrick   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
1383cab2bb3Spatrick   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
1393cab2bb3Spatrick   max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
1403cab2bb3Spatrick                                      ? common_flags()->max_allocation_size_mb
1413cab2bb3Spatrick                                            << 20
1423cab2bb3Spatrick                                      : kMaxAllowedMallocSize;
1433cab2bb3Spatrick }
1443cab2bb3Spatrick 
InitializeAllocatorLate()1453cab2bb3Spatrick void InitializeAllocatorLate() {
1463cab2bb3Spatrick   new(global_proc()) GlobalProc();
1473cab2bb3Spatrick }
1483cab2bb3Spatrick 
AllocatorProcStart(Processor * proc)1493cab2bb3Spatrick void AllocatorProcStart(Processor *proc) {
1503cab2bb3Spatrick   allocator()->InitCache(&proc->alloc_cache);
1513cab2bb3Spatrick   internal_allocator()->InitCache(&proc->internal_alloc_cache);
1523cab2bb3Spatrick }
1533cab2bb3Spatrick 
AllocatorProcFinish(Processor * proc)1543cab2bb3Spatrick void AllocatorProcFinish(Processor *proc) {
1553cab2bb3Spatrick   allocator()->DestroyCache(&proc->alloc_cache);
1563cab2bb3Spatrick   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
1573cab2bb3Spatrick }
1583cab2bb3Spatrick 
AllocatorPrintStats()1593cab2bb3Spatrick void AllocatorPrintStats() {
1603cab2bb3Spatrick   allocator()->PrintStats();
1613cab2bb3Spatrick }
1623cab2bb3Spatrick 
SignalUnsafeCall(ThreadState * thr,uptr pc)1633cab2bb3Spatrick static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
1643cab2bb3Spatrick   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
165d89ec533Spatrick       !ShouldReport(thr, ReportTypeSignalUnsafe))
1663cab2bb3Spatrick     return;
1673cab2bb3Spatrick   VarSizeStackTrace stack;
1683cab2bb3Spatrick   ObtainCurrentStack(thr, pc, &stack);
1693cab2bb3Spatrick   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
1703cab2bb3Spatrick     return;
171*810390e3Srobert   ThreadRegistryLock l(&ctx->thread_registry);
1723cab2bb3Spatrick   ScopedReport rep(ReportTypeSignalUnsafe);
1733cab2bb3Spatrick   rep.AddStack(stack, true);
1743cab2bb3Spatrick   OutputReport(thr, rep);
1753cab2bb3Spatrick }
1763cab2bb3Spatrick 
1773cab2bb3Spatrick 
user_alloc_internal(ThreadState * thr,uptr pc,uptr sz,uptr align,bool signal)1783cab2bb3Spatrick void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
1793cab2bb3Spatrick                           bool signal) {
1803cab2bb3Spatrick   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
1813cab2bb3Spatrick       sz > max_user_defined_malloc_size) {
1823cab2bb3Spatrick     if (AllocatorMayReturnNull())
1833cab2bb3Spatrick       return nullptr;
1843cab2bb3Spatrick     uptr malloc_limit =
1853cab2bb3Spatrick         Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
1863cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
1873cab2bb3Spatrick     ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
1883cab2bb3Spatrick   }
189*810390e3Srobert   if (UNLIKELY(IsRssLimitExceeded())) {
190*810390e3Srobert     if (AllocatorMayReturnNull())
191*810390e3Srobert       return nullptr;
192*810390e3Srobert     GET_STACK_TRACE_FATAL(thr, pc);
193*810390e3Srobert     ReportRssLimitExceeded(&stack);
194*810390e3Srobert   }
1953cab2bb3Spatrick   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
1963cab2bb3Spatrick   if (UNLIKELY(!p)) {
1973cab2bb3Spatrick     SetAllocatorOutOfMemory();
1983cab2bb3Spatrick     if (AllocatorMayReturnNull())
1993cab2bb3Spatrick       return nullptr;
2003cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
2013cab2bb3Spatrick     ReportOutOfMemory(sz, &stack);
2023cab2bb3Spatrick   }
2033cab2bb3Spatrick   if (ctx && ctx->initialized)
2043cab2bb3Spatrick     OnUserAlloc(thr, pc, (uptr)p, sz, true);
2053cab2bb3Spatrick   if (signal)
2063cab2bb3Spatrick     SignalUnsafeCall(thr, pc);
2073cab2bb3Spatrick   return p;
2083cab2bb3Spatrick }
2093cab2bb3Spatrick 
user_free(ThreadState * thr,uptr pc,void * p,bool signal)2103cab2bb3Spatrick void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
2113cab2bb3Spatrick   ScopedGlobalProcessor sgp;
2123cab2bb3Spatrick   if (ctx && ctx->initialized)
2133cab2bb3Spatrick     OnUserFree(thr, pc, (uptr)p, true);
2143cab2bb3Spatrick   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
2153cab2bb3Spatrick   if (signal)
2163cab2bb3Spatrick     SignalUnsafeCall(thr, pc);
2173cab2bb3Spatrick }
2183cab2bb3Spatrick 
user_alloc(ThreadState * thr,uptr pc,uptr sz)2193cab2bb3Spatrick void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
2203cab2bb3Spatrick   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
2213cab2bb3Spatrick }
2223cab2bb3Spatrick 
user_calloc(ThreadState * thr,uptr pc,uptr size,uptr n)2233cab2bb3Spatrick void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
2243cab2bb3Spatrick   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
2253cab2bb3Spatrick     if (AllocatorMayReturnNull())
2263cab2bb3Spatrick       return SetErrnoOnNull(nullptr);
2273cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
2283cab2bb3Spatrick     ReportCallocOverflow(n, size, &stack);
2293cab2bb3Spatrick   }
2303cab2bb3Spatrick   void *p = user_alloc_internal(thr, pc, n * size);
2313cab2bb3Spatrick   if (p)
2323cab2bb3Spatrick     internal_memset(p, 0, n * size);
2333cab2bb3Spatrick   return SetErrnoOnNull(p);
2343cab2bb3Spatrick }
2353cab2bb3Spatrick 
user_reallocarray(ThreadState * thr,uptr pc,void * p,uptr size,uptr n)2363cab2bb3Spatrick void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
2373cab2bb3Spatrick   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
2383cab2bb3Spatrick     if (AllocatorMayReturnNull())
2393cab2bb3Spatrick       return SetErrnoOnNull(nullptr);
2403cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
2413cab2bb3Spatrick     ReportReallocArrayOverflow(size, n, &stack);
2423cab2bb3Spatrick   }
2433cab2bb3Spatrick   return user_realloc(thr, pc, p, size * n);
2443cab2bb3Spatrick }
2453cab2bb3Spatrick 
OnUserAlloc(ThreadState * thr,uptr pc,uptr p,uptr sz,bool write)2463cab2bb3Spatrick void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
247*810390e3Srobert   DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
248*810390e3Srobert   // Note: this can run before thread initialization/after finalization.
249*810390e3Srobert   // As a result this is not necessarily synchronized with DoReset,
250*810390e3Srobert   // which iterates over and resets all sync objects,
251*810390e3Srobert   // but it is fine to create new MBlocks in this context.
2523cab2bb3Spatrick   ctx->metamap.AllocBlock(thr, pc, p, sz);
253*810390e3Srobert   // If this runs before thread initialization/after finalization
254*810390e3Srobert   // and we don't have trace initialized, we can't imitate writes.
255*810390e3Srobert   // In such case just reset the shadow range, it is fine since
256*810390e3Srobert   // it affects only a small fraction of special objects.
257*810390e3Srobert   if (write && thr->ignore_reads_and_writes == 0 &&
258*810390e3Srobert       atomic_load_relaxed(&thr->trace_pos))
2593cab2bb3Spatrick     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
2603cab2bb3Spatrick   else
2613cab2bb3Spatrick     MemoryResetRange(thr, pc, (uptr)p, sz);
2623cab2bb3Spatrick }
2633cab2bb3Spatrick 
OnUserFree(ThreadState * thr,uptr pc,uptr p,bool write)2643cab2bb3Spatrick void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
2653cab2bb3Spatrick   CHECK_NE(p, (void*)0);
266*810390e3Srobert   if (!thr->slot) {
267*810390e3Srobert     // Very early/late in thread lifetime, or during fork.
268*810390e3Srobert     UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
269*810390e3Srobert     DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
270*810390e3Srobert     return;
271*810390e3Srobert   }
272*810390e3Srobert   SlotLocker locker(thr);
273*810390e3Srobert   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
274*810390e3Srobert   DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
2753cab2bb3Spatrick   if (write && thr->ignore_reads_and_writes == 0)
2763cab2bb3Spatrick     MemoryRangeFreed(thr, pc, (uptr)p, sz);
2773cab2bb3Spatrick }
2783cab2bb3Spatrick 
user_realloc(ThreadState * thr,uptr pc,void * p,uptr sz)2793cab2bb3Spatrick void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
2803cab2bb3Spatrick   // FIXME: Handle "shrinking" more efficiently,
2813cab2bb3Spatrick   // it seems that some software actually does this.
2823cab2bb3Spatrick   if (!p)
2833cab2bb3Spatrick     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
2843cab2bb3Spatrick   if (!sz) {
2853cab2bb3Spatrick     user_free(thr, pc, p);
2863cab2bb3Spatrick     return nullptr;
2873cab2bb3Spatrick   }
2883cab2bb3Spatrick   void *new_p = user_alloc_internal(thr, pc, sz);
2893cab2bb3Spatrick   if (new_p) {
2903cab2bb3Spatrick     uptr old_sz = user_alloc_usable_size(p);
2913cab2bb3Spatrick     internal_memcpy(new_p, p, min(old_sz, sz));
2923cab2bb3Spatrick     user_free(thr, pc, p);
2933cab2bb3Spatrick   }
2943cab2bb3Spatrick   return SetErrnoOnNull(new_p);
2953cab2bb3Spatrick }
2963cab2bb3Spatrick 
user_memalign(ThreadState * thr,uptr pc,uptr align,uptr sz)2973cab2bb3Spatrick void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
2983cab2bb3Spatrick   if (UNLIKELY(!IsPowerOfTwo(align))) {
2993cab2bb3Spatrick     errno = errno_EINVAL;
3003cab2bb3Spatrick     if (AllocatorMayReturnNull())
3013cab2bb3Spatrick       return nullptr;
3023cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
3033cab2bb3Spatrick     ReportInvalidAllocationAlignment(align, &stack);
3043cab2bb3Spatrick   }
3053cab2bb3Spatrick   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
3063cab2bb3Spatrick }
3073cab2bb3Spatrick 
user_posix_memalign(ThreadState * thr,uptr pc,void ** memptr,uptr align,uptr sz)3083cab2bb3Spatrick int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
3093cab2bb3Spatrick                         uptr sz) {
3103cab2bb3Spatrick   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
3113cab2bb3Spatrick     if (AllocatorMayReturnNull())
3123cab2bb3Spatrick       return errno_EINVAL;
3133cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
3143cab2bb3Spatrick     ReportInvalidPosixMemalignAlignment(align, &stack);
3153cab2bb3Spatrick   }
3163cab2bb3Spatrick   void *ptr = user_alloc_internal(thr, pc, sz, align);
3173cab2bb3Spatrick   if (UNLIKELY(!ptr))
3183cab2bb3Spatrick     // OOM error is already taken care of by user_alloc_internal.
3193cab2bb3Spatrick     return errno_ENOMEM;
3203cab2bb3Spatrick   CHECK(IsAligned((uptr)ptr, align));
3213cab2bb3Spatrick   *memptr = ptr;
3223cab2bb3Spatrick   return 0;
3233cab2bb3Spatrick }
3243cab2bb3Spatrick 
user_aligned_alloc(ThreadState * thr,uptr pc,uptr align,uptr sz)3253cab2bb3Spatrick void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
3263cab2bb3Spatrick   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
3273cab2bb3Spatrick     errno = errno_EINVAL;
3283cab2bb3Spatrick     if (AllocatorMayReturnNull())
3293cab2bb3Spatrick       return nullptr;
3303cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
3313cab2bb3Spatrick     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
3323cab2bb3Spatrick   }
3333cab2bb3Spatrick   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
3343cab2bb3Spatrick }
3353cab2bb3Spatrick 
user_valloc(ThreadState * thr,uptr pc,uptr sz)3363cab2bb3Spatrick void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
3373cab2bb3Spatrick   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
3383cab2bb3Spatrick }
3393cab2bb3Spatrick 
user_pvalloc(ThreadState * thr,uptr pc,uptr sz)3403cab2bb3Spatrick void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
3413cab2bb3Spatrick   uptr PageSize = GetPageSizeCached();
3423cab2bb3Spatrick   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
3433cab2bb3Spatrick     errno = errno_ENOMEM;
3443cab2bb3Spatrick     if (AllocatorMayReturnNull())
3453cab2bb3Spatrick       return nullptr;
3463cab2bb3Spatrick     GET_STACK_TRACE_FATAL(thr, pc);
3473cab2bb3Spatrick     ReportPvallocOverflow(sz, &stack);
3483cab2bb3Spatrick   }
3493cab2bb3Spatrick   // pvalloc(0) should allocate one page.
3503cab2bb3Spatrick   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
3513cab2bb3Spatrick   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
3523cab2bb3Spatrick }
3533cab2bb3Spatrick 
user_alloc_usable_size(const void * p)3543cab2bb3Spatrick uptr user_alloc_usable_size(const void *p) {
355*810390e3Srobert   if (p == 0 || !IsAppMem((uptr)p))
3563cab2bb3Spatrick     return 0;
3573cab2bb3Spatrick   MBlock *b = ctx->metamap.GetBlock((uptr)p);
3583cab2bb3Spatrick   if (!b)
3593cab2bb3Spatrick     return 0;  // Not a valid pointer.
3603cab2bb3Spatrick   if (b->siz == 0)
3613cab2bb3Spatrick     return 1;  // Zero-sized allocations are actually 1 byte.
3623cab2bb3Spatrick   return b->siz;
3633cab2bb3Spatrick }
3643cab2bb3Spatrick 
invoke_malloc_hook(void * ptr,uptr size)3653cab2bb3Spatrick void invoke_malloc_hook(void *ptr, uptr size) {
3663cab2bb3Spatrick   ThreadState *thr = cur_thread();
3673cab2bb3Spatrick   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
3683cab2bb3Spatrick     return;
3693cab2bb3Spatrick   RunMallocHooks(ptr, size);
3703cab2bb3Spatrick }
3713cab2bb3Spatrick 
invoke_free_hook(void * ptr)3723cab2bb3Spatrick void invoke_free_hook(void *ptr) {
3733cab2bb3Spatrick   ThreadState *thr = cur_thread();
3743cab2bb3Spatrick   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
3753cab2bb3Spatrick     return;
3763cab2bb3Spatrick   RunFreeHooks(ptr);
3773cab2bb3Spatrick }
3783cab2bb3Spatrick 
Alloc(uptr sz)379*810390e3Srobert void *Alloc(uptr sz) {
3803cab2bb3Spatrick   ThreadState *thr = cur_thread();
3813cab2bb3Spatrick   if (thr->nomalloc) {
3823cab2bb3Spatrick     thr->nomalloc = 0;  // CHECK calls internal_malloc().
3833cab2bb3Spatrick     CHECK(0);
3843cab2bb3Spatrick   }
385*810390e3Srobert   InternalAllocAccess();
3863cab2bb3Spatrick   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
3873cab2bb3Spatrick }
3883cab2bb3Spatrick 
FreeImpl(void * p)389*810390e3Srobert void FreeImpl(void *p) {
3903cab2bb3Spatrick   ThreadState *thr = cur_thread();
3913cab2bb3Spatrick   if (thr->nomalloc) {
3923cab2bb3Spatrick     thr->nomalloc = 0;  // CHECK calls internal_malloc().
3933cab2bb3Spatrick     CHECK(0);
3943cab2bb3Spatrick   }
395*810390e3Srobert   InternalAllocAccess();
3963cab2bb3Spatrick   InternalFree(p, &thr->proc()->internal_alloc_cache);
3973cab2bb3Spatrick }
3983cab2bb3Spatrick 
3993cab2bb3Spatrick }  // namespace __tsan
4003cab2bb3Spatrick 
4013cab2bb3Spatrick using namespace __tsan;
4023cab2bb3Spatrick 
4033cab2bb3Spatrick extern "C" {
__sanitizer_get_current_allocated_bytes()4043cab2bb3Spatrick uptr __sanitizer_get_current_allocated_bytes() {
4053cab2bb3Spatrick   uptr stats[AllocatorStatCount];
4063cab2bb3Spatrick   allocator()->GetStats(stats);
4073cab2bb3Spatrick   return stats[AllocatorStatAllocated];
4083cab2bb3Spatrick }
4093cab2bb3Spatrick 
__sanitizer_get_heap_size()4103cab2bb3Spatrick uptr __sanitizer_get_heap_size() {
4113cab2bb3Spatrick   uptr stats[AllocatorStatCount];
4123cab2bb3Spatrick   allocator()->GetStats(stats);
4133cab2bb3Spatrick   return stats[AllocatorStatMapped];
4143cab2bb3Spatrick }
4153cab2bb3Spatrick 
__sanitizer_get_free_bytes()4163cab2bb3Spatrick uptr __sanitizer_get_free_bytes() {
4173cab2bb3Spatrick   return 1;
4183cab2bb3Spatrick }
4193cab2bb3Spatrick 
__sanitizer_get_unmapped_bytes()4203cab2bb3Spatrick uptr __sanitizer_get_unmapped_bytes() {
4213cab2bb3Spatrick   return 1;
4223cab2bb3Spatrick }
4233cab2bb3Spatrick 
__sanitizer_get_estimated_allocated_size(uptr size)4243cab2bb3Spatrick uptr __sanitizer_get_estimated_allocated_size(uptr size) {
4253cab2bb3Spatrick   return size;
4263cab2bb3Spatrick }
4273cab2bb3Spatrick 
__sanitizer_get_ownership(const void * p)4283cab2bb3Spatrick int __sanitizer_get_ownership(const void *p) {
4293cab2bb3Spatrick   return allocator()->GetBlockBegin(p) != 0;
4303cab2bb3Spatrick }
4313cab2bb3Spatrick 
__sanitizer_get_allocated_size(const void * p)4323cab2bb3Spatrick uptr __sanitizer_get_allocated_size(const void *p) {
4333cab2bb3Spatrick   return user_alloc_usable_size(p);
4343cab2bb3Spatrick }
4353cab2bb3Spatrick 
__tsan_on_thread_idle()4363cab2bb3Spatrick void __tsan_on_thread_idle() {
4373cab2bb3Spatrick   ThreadState *thr = cur_thread();
4383cab2bb3Spatrick   allocator()->SwallowCache(&thr->proc()->alloc_cache);
4393cab2bb3Spatrick   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
4403cab2bb3Spatrick   ctx->metamap.OnProcIdle(thr->proc());
4413cab2bb3Spatrick }
4423cab2bb3Spatrick }  // extern "C"
443