xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
168d75effSDimitry Andric //===-- tsan_mman.cpp -----------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
1068d75effSDimitry Andric //
1168d75effSDimitry Andric //===----------------------------------------------------------------------===//
12*0fca6ea1SDimitry Andric #include "tsan_mman.h"
13*0fca6ea1SDimitry Andric 
1468d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_checks.h"
1568d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_interface.h"
1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_report.h"
1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h"
1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_errno.h"
1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
20*0fca6ea1SDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
2168d75effSDimitry Andric #include "tsan_flags.h"
22*0fca6ea1SDimitry Andric #include "tsan_interface.h"
23*0fca6ea1SDimitry Andric #include "tsan_report.h"
24*0fca6ea1SDimitry Andric #include "tsan_rtl.h"
2568d75effSDimitry Andric 
2668d75effSDimitry Andric namespace __tsan {
2768d75effSDimitry Andric 
2868d75effSDimitry Andric struct MapUnmapCallback {
2968d75effSDimitry Andric   void OnMap(uptr p, uptr size) const { }
3006c3fb27SDimitry Andric   void OnMapSecondary(uptr p, uptr size, uptr user_begin,
3106c3fb27SDimitry Andric                       uptr user_size) const {};
3268d75effSDimitry Andric   void OnUnmap(uptr p, uptr size) const {
3368d75effSDimitry Andric     // We are about to unmap a chunk of user memory.
3468d75effSDimitry Andric     // Mark the corresponding shadow memory as not needed.
3568d75effSDimitry Andric     DontNeedShadowFor(p, size);
3668d75effSDimitry Andric     // Mark the corresponding meta shadow memory as not needed.
3768d75effSDimitry Andric     // Note the block does not contain any meta info at this point
3868d75effSDimitry Andric     // (this happens after free).
3968d75effSDimitry Andric     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
4068d75effSDimitry Andric     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
4168d75effSDimitry Andric     // Block came from LargeMmapAllocator, so must be large.
4268d75effSDimitry Andric     // We rely on this in the calculations below.
4368d75effSDimitry Andric     CHECK_GE(size, 2 * kPageSize);
4468d75effSDimitry Andric     uptr diff = RoundUp(p, kPageSize) - p;
4568d75effSDimitry Andric     if (diff != 0) {
4668d75effSDimitry Andric       p += diff;
4768d75effSDimitry Andric       size -= diff;
4868d75effSDimitry Andric     }
4968d75effSDimitry Andric     diff = p + size - RoundDown(p + size, kPageSize);
5068d75effSDimitry Andric     if (diff != 0)
5168d75effSDimitry Andric       size -= diff;
5268d75effSDimitry Andric     uptr p_meta = (uptr)MemToMeta(p);
5368d75effSDimitry Andric     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
5468d75effSDimitry Andric   }
5568d75effSDimitry Andric };
5668d75effSDimitry Andric 
57*0fca6ea1SDimitry Andric alignas(64) static char allocator_placeholder[sizeof(Allocator)];
5868d75effSDimitry Andric Allocator *allocator() {
5968d75effSDimitry Andric   return reinterpret_cast<Allocator*>(&allocator_placeholder);
6068d75effSDimitry Andric }
6168d75effSDimitry Andric 
6268d75effSDimitry Andric struct GlobalProc {
6368d75effSDimitry Andric   Mutex mtx;
6468d75effSDimitry Andric   Processor *proc;
654824e7fdSDimitry Andric   // This mutex represents the internal allocator combined for
664824e7fdSDimitry Andric   // the purposes of deadlock detection. The internal allocator
674824e7fdSDimitry Andric   // uses multiple mutexes, moreover they are locked only occasionally
684824e7fdSDimitry Andric   // and they are spin mutexes which don't support deadlock detection.
694824e7fdSDimitry Andric   // So we use this fake mutex to serve as a substitute for these mutexes.
704824e7fdSDimitry Andric   CheckedMutex internal_alloc_mtx;
7168d75effSDimitry Andric 
724824e7fdSDimitry Andric   GlobalProc()
734824e7fdSDimitry Andric       : mtx(MutexTypeGlobalProc),
744824e7fdSDimitry Andric         proc(ProcCreate()),
754824e7fdSDimitry Andric         internal_alloc_mtx(MutexTypeInternalAlloc) {}
7668d75effSDimitry Andric };
7768d75effSDimitry Andric 
78*0fca6ea1SDimitry Andric alignas(64) static char global_proc_placeholder[sizeof(GlobalProc)];
7968d75effSDimitry Andric GlobalProc *global_proc() {
8068d75effSDimitry Andric   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
8168d75effSDimitry Andric }
8268d75effSDimitry Andric 
834824e7fdSDimitry Andric static void InternalAllocAccess() {
844824e7fdSDimitry Andric   global_proc()->internal_alloc_mtx.Lock();
854824e7fdSDimitry Andric   global_proc()->internal_alloc_mtx.Unlock();
864824e7fdSDimitry Andric }
874824e7fdSDimitry Andric 
8868d75effSDimitry Andric ScopedGlobalProcessor::ScopedGlobalProcessor() {
8968d75effSDimitry Andric   GlobalProc *gp = global_proc();
9068d75effSDimitry Andric   ThreadState *thr = cur_thread();
9168d75effSDimitry Andric   if (thr->proc())
9268d75effSDimitry Andric     return;
9368d75effSDimitry Andric   // If we don't have a proc, use the global one.
9468d75effSDimitry Andric   // There are currently only two known case where this path is triggered:
9568d75effSDimitry Andric   //   __interceptor_free
9668d75effSDimitry Andric   //   __nptl_deallocate_tsd
9768d75effSDimitry Andric   //   start_thread
9868d75effSDimitry Andric   //   clone
9968d75effSDimitry Andric   // and:
10068d75effSDimitry Andric   //   ResetRange
10168d75effSDimitry Andric   //   __interceptor_munmap
10268d75effSDimitry Andric   //   __deallocate_stack
10368d75effSDimitry Andric   //   start_thread
10468d75effSDimitry Andric   //   clone
10568d75effSDimitry Andric   // Ideally, we destroy thread state (and unwire proc) when a thread actually
10668d75effSDimitry Andric   // exits (i.e. when we join/wait it). Then we would not need the global proc
10768d75effSDimitry Andric   gp->mtx.Lock();
10868d75effSDimitry Andric   ProcWire(gp->proc, thr);
10968d75effSDimitry Andric }
11068d75effSDimitry Andric 
11168d75effSDimitry Andric ScopedGlobalProcessor::~ScopedGlobalProcessor() {
11268d75effSDimitry Andric   GlobalProc *gp = global_proc();
11368d75effSDimitry Andric   ThreadState *thr = cur_thread();
11468d75effSDimitry Andric   if (thr->proc() != gp->proc)
11568d75effSDimitry Andric     return;
11668d75effSDimitry Andric   ProcUnwire(gp->proc, thr);
11768d75effSDimitry Andric   gp->mtx.Unlock();
11868d75effSDimitry Andric }
11968d75effSDimitry Andric 
120*0fca6ea1SDimitry Andric void AllocatorLockBeforeFork() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1214824e7fdSDimitry Andric   global_proc()->internal_alloc_mtx.Lock();
1224824e7fdSDimitry Andric   InternalAllocatorLock();
123*0fca6ea1SDimitry Andric #if !SANITIZER_APPLE
124*0fca6ea1SDimitry Andric   // OS X allocates from hooks, see 6a3958247a.
125*0fca6ea1SDimitry Andric   allocator()->ForceLock();
126*0fca6ea1SDimitry Andric   StackDepotLockBeforeFork();
127*0fca6ea1SDimitry Andric #endif
1284824e7fdSDimitry Andric }
1294824e7fdSDimitry Andric 
130*0fca6ea1SDimitry Andric void AllocatorUnlockAfterFork(bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
131*0fca6ea1SDimitry Andric #if !SANITIZER_APPLE
132*0fca6ea1SDimitry Andric   StackDepotUnlockAfterFork(child);
133*0fca6ea1SDimitry Andric   allocator()->ForceUnlock();
134*0fca6ea1SDimitry Andric #endif
1354824e7fdSDimitry Andric   InternalAllocatorUnlock();
1364824e7fdSDimitry Andric   global_proc()->internal_alloc_mtx.Unlock();
1370eae32dcSDimitry Andric }
1380eae32dcSDimitry Andric 
13904eeddc0SDimitry Andric void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1400eae32dcSDimitry Andric   global_proc()->mtx.Lock();
1410eae32dcSDimitry Andric }
1420eae32dcSDimitry Andric 
14304eeddc0SDimitry Andric void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1444824e7fdSDimitry Andric   global_proc()->mtx.Unlock();
1454824e7fdSDimitry Andric }
1464824e7fdSDimitry Andric 
147480093f4SDimitry Andric static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
148480093f4SDimitry Andric static uptr max_user_defined_malloc_size;
149480093f4SDimitry Andric 
15068d75effSDimitry Andric void InitializeAllocator() {
15168d75effSDimitry Andric   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
15268d75effSDimitry Andric   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
153480093f4SDimitry Andric   max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
154480093f4SDimitry Andric                                      ? common_flags()->max_allocation_size_mb
155480093f4SDimitry Andric                                            << 20
156480093f4SDimitry Andric                                      : kMaxAllowedMallocSize;
15768d75effSDimitry Andric }
15868d75effSDimitry Andric 
15968d75effSDimitry Andric void InitializeAllocatorLate() {
16068d75effSDimitry Andric   new(global_proc()) GlobalProc();
16168d75effSDimitry Andric }
16268d75effSDimitry Andric 
16368d75effSDimitry Andric void AllocatorProcStart(Processor *proc) {
16468d75effSDimitry Andric   allocator()->InitCache(&proc->alloc_cache);
16568d75effSDimitry Andric   internal_allocator()->InitCache(&proc->internal_alloc_cache);
16668d75effSDimitry Andric }
16768d75effSDimitry Andric 
16868d75effSDimitry Andric void AllocatorProcFinish(Processor *proc) {
16968d75effSDimitry Andric   allocator()->DestroyCache(&proc->alloc_cache);
17068d75effSDimitry Andric   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
17168d75effSDimitry Andric }
17268d75effSDimitry Andric 
17368d75effSDimitry Andric void AllocatorPrintStats() {
17468d75effSDimitry Andric   allocator()->PrintStats();
17568d75effSDimitry Andric }
17668d75effSDimitry Andric 
17768d75effSDimitry Andric static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
17868d75effSDimitry Andric   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
179fe6060f1SDimitry Andric       !ShouldReport(thr, ReportTypeSignalUnsafe))
18068d75effSDimitry Andric     return;
18168d75effSDimitry Andric   VarSizeStackTrace stack;
18268d75effSDimitry Andric   ObtainCurrentStack(thr, pc, &stack);
18368d75effSDimitry Andric   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
18468d75effSDimitry Andric     return;
185349cc55cSDimitry Andric   ThreadRegistryLock l(&ctx->thread_registry);
18668d75effSDimitry Andric   ScopedReport rep(ReportTypeSignalUnsafe);
18768d75effSDimitry Andric   rep.AddStack(stack, true);
18868d75effSDimitry Andric   OutputReport(thr, rep);
18968d75effSDimitry Andric }
19068d75effSDimitry Andric 
19168d75effSDimitry Andric 
19268d75effSDimitry Andric void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
19368d75effSDimitry Andric                           bool signal) {
194480093f4SDimitry Andric   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
195480093f4SDimitry Andric       sz > max_user_defined_malloc_size) {
19668d75effSDimitry Andric     if (AllocatorMayReturnNull())
19768d75effSDimitry Andric       return nullptr;
198480093f4SDimitry Andric     uptr malloc_limit =
199480093f4SDimitry Andric         Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
20068d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
201480093f4SDimitry Andric     ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
20268d75effSDimitry Andric   }
2030eae32dcSDimitry Andric   if (UNLIKELY(IsRssLimitExceeded())) {
2040eae32dcSDimitry Andric     if (AllocatorMayReturnNull())
2050eae32dcSDimitry Andric       return nullptr;
2060eae32dcSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
2070eae32dcSDimitry Andric     ReportRssLimitExceeded(&stack);
2080eae32dcSDimitry Andric   }
20968d75effSDimitry Andric   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
21068d75effSDimitry Andric   if (UNLIKELY(!p)) {
21168d75effSDimitry Andric     SetAllocatorOutOfMemory();
21268d75effSDimitry Andric     if (AllocatorMayReturnNull())
21368d75effSDimitry Andric       return nullptr;
21468d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
21568d75effSDimitry Andric     ReportOutOfMemory(sz, &stack);
21668d75effSDimitry Andric   }
21768d75effSDimitry Andric   if (ctx && ctx->initialized)
21868d75effSDimitry Andric     OnUserAlloc(thr, pc, (uptr)p, sz, true);
21968d75effSDimitry Andric   if (signal)
22068d75effSDimitry Andric     SignalUnsafeCall(thr, pc);
22168d75effSDimitry Andric   return p;
22268d75effSDimitry Andric }
22368d75effSDimitry Andric 
22468d75effSDimitry Andric void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
22568d75effSDimitry Andric   ScopedGlobalProcessor sgp;
22668d75effSDimitry Andric   if (ctx && ctx->initialized)
22768d75effSDimitry Andric     OnUserFree(thr, pc, (uptr)p, true);
22868d75effSDimitry Andric   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
22968d75effSDimitry Andric   if (signal)
23068d75effSDimitry Andric     SignalUnsafeCall(thr, pc);
23168d75effSDimitry Andric }
23268d75effSDimitry Andric 
23368d75effSDimitry Andric void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
23468d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
23568d75effSDimitry Andric }
23668d75effSDimitry Andric 
23768d75effSDimitry Andric void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
23868d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
23968d75effSDimitry Andric     if (AllocatorMayReturnNull())
24068d75effSDimitry Andric       return SetErrnoOnNull(nullptr);
24168d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
24268d75effSDimitry Andric     ReportCallocOverflow(n, size, &stack);
24368d75effSDimitry Andric   }
24468d75effSDimitry Andric   void *p = user_alloc_internal(thr, pc, n * size);
24568d75effSDimitry Andric   if (p)
24668d75effSDimitry Andric     internal_memset(p, 0, n * size);
24768d75effSDimitry Andric   return SetErrnoOnNull(p);
24868d75effSDimitry Andric }
24968d75effSDimitry Andric 
25068d75effSDimitry Andric void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
25168d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
25268d75effSDimitry Andric     if (AllocatorMayReturnNull())
25368d75effSDimitry Andric       return SetErrnoOnNull(nullptr);
25468d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
25568d75effSDimitry Andric     ReportReallocArrayOverflow(size, n, &stack);
25668d75effSDimitry Andric   }
25768d75effSDimitry Andric   return user_realloc(thr, pc, p, size * n);
25868d75effSDimitry Andric }
25968d75effSDimitry Andric 
26068d75effSDimitry Andric void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
261349cc55cSDimitry Andric   DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
2620eae32dcSDimitry Andric   // Note: this can run before thread initialization/after finalization.
2630eae32dcSDimitry Andric   // As a result this is not necessarily synchronized with DoReset,
2640eae32dcSDimitry Andric   // which iterates over and resets all sync objects,
2650eae32dcSDimitry Andric   // but it is fine to create new MBlocks in this context.
26668d75effSDimitry Andric   ctx->metamap.AllocBlock(thr, pc, p, sz);
2670eae32dcSDimitry Andric   // If this runs before thread initialization/after finalization
2680eae32dcSDimitry Andric   // and we don't have trace initialized, we can't imitate writes.
2690eae32dcSDimitry Andric   // In such case just reset the shadow range, it is fine since
2700eae32dcSDimitry Andric   // it affects only a small fraction of special objects.
2710eae32dcSDimitry Andric   if (write && thr->ignore_reads_and_writes == 0 &&
2720eae32dcSDimitry Andric       atomic_load_relaxed(&thr->trace_pos))
27368d75effSDimitry Andric     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
27468d75effSDimitry Andric   else
27568d75effSDimitry Andric     MemoryResetRange(thr, pc, (uptr)p, sz);
27668d75effSDimitry Andric }
27768d75effSDimitry Andric 
27868d75effSDimitry Andric void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
27968d75effSDimitry Andric   CHECK_NE(p, (void*)0);
2800eae32dcSDimitry Andric   if (!thr->slot) {
2810eae32dcSDimitry Andric     // Very early/late in thread lifetime, or during fork.
2820eae32dcSDimitry Andric     UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
2830eae32dcSDimitry Andric     DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
2840eae32dcSDimitry Andric     return;
2850eae32dcSDimitry Andric   }
2860eae32dcSDimitry Andric   SlotLocker locker(thr);
2870eae32dcSDimitry Andric   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
288349cc55cSDimitry Andric   DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
2890eae32dcSDimitry Andric   if (write && thr->ignore_reads_and_writes == 0)
29068d75effSDimitry Andric     MemoryRangeFreed(thr, pc, (uptr)p, sz);
29168d75effSDimitry Andric }
29268d75effSDimitry Andric 
29368d75effSDimitry Andric void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
29468d75effSDimitry Andric   // FIXME: Handle "shrinking" more efficiently,
29568d75effSDimitry Andric   // it seems that some software actually does this.
29668d75effSDimitry Andric   if (!p)
29768d75effSDimitry Andric     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
29868d75effSDimitry Andric   if (!sz) {
29968d75effSDimitry Andric     user_free(thr, pc, p);
30068d75effSDimitry Andric     return nullptr;
30168d75effSDimitry Andric   }
30268d75effSDimitry Andric   void *new_p = user_alloc_internal(thr, pc, sz);
30368d75effSDimitry Andric   if (new_p) {
30468d75effSDimitry Andric     uptr old_sz = user_alloc_usable_size(p);
30568d75effSDimitry Andric     internal_memcpy(new_p, p, min(old_sz, sz));
30668d75effSDimitry Andric     user_free(thr, pc, p);
30768d75effSDimitry Andric   }
30868d75effSDimitry Andric   return SetErrnoOnNull(new_p);
30968d75effSDimitry Andric }
31068d75effSDimitry Andric 
31168d75effSDimitry Andric void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
31268d75effSDimitry Andric   if (UNLIKELY(!IsPowerOfTwo(align))) {
31368d75effSDimitry Andric     errno = errno_EINVAL;
31468d75effSDimitry Andric     if (AllocatorMayReturnNull())
31568d75effSDimitry Andric       return nullptr;
31668d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
31768d75effSDimitry Andric     ReportInvalidAllocationAlignment(align, &stack);
31868d75effSDimitry Andric   }
31968d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
32068d75effSDimitry Andric }
32168d75effSDimitry Andric 
32268d75effSDimitry Andric int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
32368d75effSDimitry Andric                         uptr sz) {
32468d75effSDimitry Andric   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
32568d75effSDimitry Andric     if (AllocatorMayReturnNull())
32668d75effSDimitry Andric       return errno_EINVAL;
32768d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
32868d75effSDimitry Andric     ReportInvalidPosixMemalignAlignment(align, &stack);
32968d75effSDimitry Andric   }
33068d75effSDimitry Andric   void *ptr = user_alloc_internal(thr, pc, sz, align);
33168d75effSDimitry Andric   if (UNLIKELY(!ptr))
33268d75effSDimitry Andric     // OOM error is already taken care of by user_alloc_internal.
33368d75effSDimitry Andric     return errno_ENOMEM;
33468d75effSDimitry Andric   CHECK(IsAligned((uptr)ptr, align));
33568d75effSDimitry Andric   *memptr = ptr;
33668d75effSDimitry Andric   return 0;
33768d75effSDimitry Andric }
33868d75effSDimitry Andric 
33968d75effSDimitry Andric void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
34068d75effSDimitry Andric   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
34168d75effSDimitry Andric     errno = errno_EINVAL;
34268d75effSDimitry Andric     if (AllocatorMayReturnNull())
34368d75effSDimitry Andric       return nullptr;
34468d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
34568d75effSDimitry Andric     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
34668d75effSDimitry Andric   }
34768d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
34868d75effSDimitry Andric }
34968d75effSDimitry Andric 
35068d75effSDimitry Andric void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
35168d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
35268d75effSDimitry Andric }
35368d75effSDimitry Andric 
35468d75effSDimitry Andric void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
35568d75effSDimitry Andric   uptr PageSize = GetPageSizeCached();
35668d75effSDimitry Andric   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
35768d75effSDimitry Andric     errno = errno_ENOMEM;
35868d75effSDimitry Andric     if (AllocatorMayReturnNull())
35968d75effSDimitry Andric       return nullptr;
36068d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
36168d75effSDimitry Andric     ReportPvallocOverflow(sz, &stack);
36268d75effSDimitry Andric   }
36368d75effSDimitry Andric   // pvalloc(0) should allocate one page.
36468d75effSDimitry Andric   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
36568d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
36668d75effSDimitry Andric }
36768d75effSDimitry Andric 
36806c3fb27SDimitry Andric static const void *user_alloc_begin(const void *p) {
36906c3fb27SDimitry Andric   if (p == nullptr || !IsAppMem((uptr)p))
37006c3fb27SDimitry Andric     return nullptr;
37106c3fb27SDimitry Andric   void *beg = allocator()->GetBlockBegin(p);
37206c3fb27SDimitry Andric   if (!beg)
37306c3fb27SDimitry Andric     return nullptr;
37406c3fb27SDimitry Andric 
37506c3fb27SDimitry Andric   MBlock *b = ctx->metamap.GetBlock((uptr)beg);
37606c3fb27SDimitry Andric   if (!b)
37706c3fb27SDimitry Andric     return nullptr;  // Not a valid pointer.
37806c3fb27SDimitry Andric 
37906c3fb27SDimitry Andric   return (const void *)beg;
38006c3fb27SDimitry Andric }
38106c3fb27SDimitry Andric 
38268d75effSDimitry Andric uptr user_alloc_usable_size(const void *p) {
3830eae32dcSDimitry Andric   if (p == 0 || !IsAppMem((uptr)p))
38468d75effSDimitry Andric     return 0;
38568d75effSDimitry Andric   MBlock *b = ctx->metamap.GetBlock((uptr)p);
38668d75effSDimitry Andric   if (!b)
38768d75effSDimitry Andric     return 0;  // Not a valid pointer.
38868d75effSDimitry Andric   if (b->siz == 0)
38968d75effSDimitry Andric     return 1;  // Zero-sized allocations are actually 1 byte.
39068d75effSDimitry Andric   return b->siz;
39168d75effSDimitry Andric }
39268d75effSDimitry Andric 
39306c3fb27SDimitry Andric uptr user_alloc_usable_size_fast(const void *p) {
39406c3fb27SDimitry Andric   MBlock *b = ctx->metamap.GetBlock((uptr)p);
39506c3fb27SDimitry Andric   // Static objects may have malloc'd before tsan completes
39606c3fb27SDimitry Andric   // initialization, and may believe returned ptrs to be valid.
39706c3fb27SDimitry Andric   if (!b)
39806c3fb27SDimitry Andric     return 0;  // Not a valid pointer.
39906c3fb27SDimitry Andric   if (b->siz == 0)
40006c3fb27SDimitry Andric     return 1;  // Zero-sized allocations are actually 1 byte.
40106c3fb27SDimitry Andric   return b->siz;
40206c3fb27SDimitry Andric }
40306c3fb27SDimitry Andric 
40468d75effSDimitry Andric void invoke_malloc_hook(void *ptr, uptr size) {
40568d75effSDimitry Andric   ThreadState *thr = cur_thread();
40668d75effSDimitry Andric   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
40768d75effSDimitry Andric     return;
40868d75effSDimitry Andric   RunMallocHooks(ptr, size);
40968d75effSDimitry Andric }
41068d75effSDimitry Andric 
41168d75effSDimitry Andric void invoke_free_hook(void *ptr) {
41268d75effSDimitry Andric   ThreadState *thr = cur_thread();
41368d75effSDimitry Andric   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
41468d75effSDimitry Andric     return;
41568d75effSDimitry Andric   RunFreeHooks(ptr);
41668d75effSDimitry Andric }
41768d75effSDimitry Andric 
418349cc55cSDimitry Andric void *Alloc(uptr sz) {
41968d75effSDimitry Andric   ThreadState *thr = cur_thread();
42068d75effSDimitry Andric   if (thr->nomalloc) {
42168d75effSDimitry Andric     thr->nomalloc = 0;  // CHECK calls internal_malloc().
42268d75effSDimitry Andric     CHECK(0);
42368d75effSDimitry Andric   }
4244824e7fdSDimitry Andric   InternalAllocAccess();
42568d75effSDimitry Andric   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
42668d75effSDimitry Andric }
42768d75effSDimitry Andric 
428349cc55cSDimitry Andric void FreeImpl(void *p) {
42968d75effSDimitry Andric   ThreadState *thr = cur_thread();
43068d75effSDimitry Andric   if (thr->nomalloc) {
43168d75effSDimitry Andric     thr->nomalloc = 0;  // CHECK calls internal_malloc().
43268d75effSDimitry Andric     CHECK(0);
43368d75effSDimitry Andric   }
4344824e7fdSDimitry Andric   InternalAllocAccess();
43568d75effSDimitry Andric   InternalFree(p, &thr->proc()->internal_alloc_cache);
43668d75effSDimitry Andric }
43768d75effSDimitry Andric 
43868d75effSDimitry Andric }  // namespace __tsan
43968d75effSDimitry Andric 
44068d75effSDimitry Andric using namespace __tsan;
44168d75effSDimitry Andric 
44268d75effSDimitry Andric extern "C" {
44368d75effSDimitry Andric uptr __sanitizer_get_current_allocated_bytes() {
44468d75effSDimitry Andric   uptr stats[AllocatorStatCount];
44568d75effSDimitry Andric   allocator()->GetStats(stats);
44668d75effSDimitry Andric   return stats[AllocatorStatAllocated];
44768d75effSDimitry Andric }
44868d75effSDimitry Andric 
44968d75effSDimitry Andric uptr __sanitizer_get_heap_size() {
45068d75effSDimitry Andric   uptr stats[AllocatorStatCount];
45168d75effSDimitry Andric   allocator()->GetStats(stats);
45268d75effSDimitry Andric   return stats[AllocatorStatMapped];
45368d75effSDimitry Andric }
45468d75effSDimitry Andric 
45568d75effSDimitry Andric uptr __sanitizer_get_free_bytes() {
45668d75effSDimitry Andric   return 1;
45768d75effSDimitry Andric }
45868d75effSDimitry Andric 
45968d75effSDimitry Andric uptr __sanitizer_get_unmapped_bytes() {
46068d75effSDimitry Andric   return 1;
46168d75effSDimitry Andric }
46268d75effSDimitry Andric 
46368d75effSDimitry Andric uptr __sanitizer_get_estimated_allocated_size(uptr size) {
46468d75effSDimitry Andric   return size;
46568d75effSDimitry Andric }
46668d75effSDimitry Andric 
46768d75effSDimitry Andric int __sanitizer_get_ownership(const void *p) {
46868d75effSDimitry Andric   return allocator()->GetBlockBegin(p) != 0;
46968d75effSDimitry Andric }
47068d75effSDimitry Andric 
47106c3fb27SDimitry Andric const void *__sanitizer_get_allocated_begin(const void *p) {
47206c3fb27SDimitry Andric   return user_alloc_begin(p);
47306c3fb27SDimitry Andric }
47406c3fb27SDimitry Andric 
47568d75effSDimitry Andric uptr __sanitizer_get_allocated_size(const void *p) {
47668d75effSDimitry Andric   return user_alloc_usable_size(p);
47768d75effSDimitry Andric }
47868d75effSDimitry Andric 
47906c3fb27SDimitry Andric uptr __sanitizer_get_allocated_size_fast(const void *p) {
48006c3fb27SDimitry Andric   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
48106c3fb27SDimitry Andric   uptr ret = user_alloc_usable_size_fast(p);
48206c3fb27SDimitry Andric   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
48306c3fb27SDimitry Andric   return ret;
48406c3fb27SDimitry Andric }
48506c3fb27SDimitry Andric 
48606c3fb27SDimitry Andric void __sanitizer_purge_allocator() {
48706c3fb27SDimitry Andric   allocator()->ForceReleaseToOS();
48806c3fb27SDimitry Andric }
48906c3fb27SDimitry Andric 
49068d75effSDimitry Andric void __tsan_on_thread_idle() {
49168d75effSDimitry Andric   ThreadState *thr = cur_thread();
49268d75effSDimitry Andric   allocator()->SwallowCache(&thr->proc()->alloc_cache);
49368d75effSDimitry Andric   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
49468d75effSDimitry Andric   ctx->metamap.OnProcIdle(thr->proc());
49568d75effSDimitry Andric }
49668d75effSDimitry Andric }  // extern "C"
497