xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp (revision 68d75eff68281c1b445e3010bb975eae07aac225)
1*68d75effSDimitry Andric //===-- tsan_mman.cpp -----------------------------------------------------===//
2*68d75effSDimitry Andric //
3*68d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*68d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*68d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*68d75effSDimitry Andric //
7*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
8*68d75effSDimitry Andric //
9*68d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
10*68d75effSDimitry Andric //
11*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
12*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_checks.h"
13*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_interface.h"
14*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_allocator_report.h"
15*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h"
16*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_errno.h"
17*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
18*68d75effSDimitry Andric #include "tsan_mman.h"
19*68d75effSDimitry Andric #include "tsan_rtl.h"
20*68d75effSDimitry Andric #include "tsan_report.h"
21*68d75effSDimitry Andric #include "tsan_flags.h"
22*68d75effSDimitry Andric 
23*68d75effSDimitry Andric // May be overriden by front-end.
24*68d75effSDimitry Andric SANITIZER_WEAK_DEFAULT_IMPL
25*68d75effSDimitry Andric void __sanitizer_malloc_hook(void *ptr, uptr size) {
26*68d75effSDimitry Andric   (void)ptr;
27*68d75effSDimitry Andric   (void)size;
28*68d75effSDimitry Andric }
29*68d75effSDimitry Andric 
30*68d75effSDimitry Andric SANITIZER_WEAK_DEFAULT_IMPL
31*68d75effSDimitry Andric void __sanitizer_free_hook(void *ptr) {
32*68d75effSDimitry Andric   (void)ptr;
33*68d75effSDimitry Andric }
34*68d75effSDimitry Andric 
35*68d75effSDimitry Andric namespace __tsan {
36*68d75effSDimitry Andric 
37*68d75effSDimitry Andric struct MapUnmapCallback {
38*68d75effSDimitry Andric   void OnMap(uptr p, uptr size) const { }
39*68d75effSDimitry Andric   void OnUnmap(uptr p, uptr size) const {
40*68d75effSDimitry Andric     // We are about to unmap a chunk of user memory.
41*68d75effSDimitry Andric     // Mark the corresponding shadow memory as not needed.
42*68d75effSDimitry Andric     DontNeedShadowFor(p, size);
43*68d75effSDimitry Andric     // Mark the corresponding meta shadow memory as not needed.
44*68d75effSDimitry Andric     // Note the block does not contain any meta info at this point
45*68d75effSDimitry Andric     // (this happens after free).
46*68d75effSDimitry Andric     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
47*68d75effSDimitry Andric     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
48*68d75effSDimitry Andric     // Block came from LargeMmapAllocator, so must be large.
49*68d75effSDimitry Andric     // We rely on this in the calculations below.
50*68d75effSDimitry Andric     CHECK_GE(size, 2 * kPageSize);
51*68d75effSDimitry Andric     uptr diff = RoundUp(p, kPageSize) - p;
52*68d75effSDimitry Andric     if (diff != 0) {
53*68d75effSDimitry Andric       p += diff;
54*68d75effSDimitry Andric       size -= diff;
55*68d75effSDimitry Andric     }
56*68d75effSDimitry Andric     diff = p + size - RoundDown(p + size, kPageSize);
57*68d75effSDimitry Andric     if (diff != 0)
58*68d75effSDimitry Andric       size -= diff;
59*68d75effSDimitry Andric     uptr p_meta = (uptr)MemToMeta(p);
60*68d75effSDimitry Andric     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
61*68d75effSDimitry Andric   }
62*68d75effSDimitry Andric };
63*68d75effSDimitry Andric 
64*68d75effSDimitry Andric static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
65*68d75effSDimitry Andric Allocator *allocator() {
66*68d75effSDimitry Andric   return reinterpret_cast<Allocator*>(&allocator_placeholder);
67*68d75effSDimitry Andric }
68*68d75effSDimitry Andric 
69*68d75effSDimitry Andric struct GlobalProc {
70*68d75effSDimitry Andric   Mutex mtx;
71*68d75effSDimitry Andric   Processor *proc;
72*68d75effSDimitry Andric 
73*68d75effSDimitry Andric   GlobalProc()
74*68d75effSDimitry Andric       : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
75*68d75effSDimitry Andric       , proc(ProcCreate()) {
76*68d75effSDimitry Andric   }
77*68d75effSDimitry Andric };
78*68d75effSDimitry Andric 
79*68d75effSDimitry Andric static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
80*68d75effSDimitry Andric GlobalProc *global_proc() {
81*68d75effSDimitry Andric   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
82*68d75effSDimitry Andric }
83*68d75effSDimitry Andric 
84*68d75effSDimitry Andric ScopedGlobalProcessor::ScopedGlobalProcessor() {
85*68d75effSDimitry Andric   GlobalProc *gp = global_proc();
86*68d75effSDimitry Andric   ThreadState *thr = cur_thread();
87*68d75effSDimitry Andric   if (thr->proc())
88*68d75effSDimitry Andric     return;
89*68d75effSDimitry Andric   // If we don't have a proc, use the global one.
90*68d75effSDimitry Andric   // There are currently only two known case where this path is triggered:
91*68d75effSDimitry Andric   //   __interceptor_free
92*68d75effSDimitry Andric   //   __nptl_deallocate_tsd
93*68d75effSDimitry Andric   //   start_thread
94*68d75effSDimitry Andric   //   clone
95*68d75effSDimitry Andric   // and:
96*68d75effSDimitry Andric   //   ResetRange
97*68d75effSDimitry Andric   //   __interceptor_munmap
98*68d75effSDimitry Andric   //   __deallocate_stack
99*68d75effSDimitry Andric   //   start_thread
100*68d75effSDimitry Andric   //   clone
101*68d75effSDimitry Andric   // Ideally, we destroy thread state (and unwire proc) when a thread actually
102*68d75effSDimitry Andric   // exits (i.e. when we join/wait it). Then we would not need the global proc
103*68d75effSDimitry Andric   gp->mtx.Lock();
104*68d75effSDimitry Andric   ProcWire(gp->proc, thr);
105*68d75effSDimitry Andric }
106*68d75effSDimitry Andric 
107*68d75effSDimitry Andric ScopedGlobalProcessor::~ScopedGlobalProcessor() {
108*68d75effSDimitry Andric   GlobalProc *gp = global_proc();
109*68d75effSDimitry Andric   ThreadState *thr = cur_thread();
110*68d75effSDimitry Andric   if (thr->proc() != gp->proc)
111*68d75effSDimitry Andric     return;
112*68d75effSDimitry Andric   ProcUnwire(gp->proc, thr);
113*68d75effSDimitry Andric   gp->mtx.Unlock();
114*68d75effSDimitry Andric }
115*68d75effSDimitry Andric 
116*68d75effSDimitry Andric void InitializeAllocator() {
117*68d75effSDimitry Andric   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
118*68d75effSDimitry Andric   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
119*68d75effSDimitry Andric }
120*68d75effSDimitry Andric 
121*68d75effSDimitry Andric void InitializeAllocatorLate() {
122*68d75effSDimitry Andric   new(global_proc()) GlobalProc();
123*68d75effSDimitry Andric }
124*68d75effSDimitry Andric 
125*68d75effSDimitry Andric void AllocatorProcStart(Processor *proc) {
126*68d75effSDimitry Andric   allocator()->InitCache(&proc->alloc_cache);
127*68d75effSDimitry Andric   internal_allocator()->InitCache(&proc->internal_alloc_cache);
128*68d75effSDimitry Andric }
129*68d75effSDimitry Andric 
130*68d75effSDimitry Andric void AllocatorProcFinish(Processor *proc) {
131*68d75effSDimitry Andric   allocator()->DestroyCache(&proc->alloc_cache);
132*68d75effSDimitry Andric   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
133*68d75effSDimitry Andric }
134*68d75effSDimitry Andric 
135*68d75effSDimitry Andric void AllocatorPrintStats() {
136*68d75effSDimitry Andric   allocator()->PrintStats();
137*68d75effSDimitry Andric }
138*68d75effSDimitry Andric 
139*68d75effSDimitry Andric static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
140*68d75effSDimitry Andric   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
141*68d75effSDimitry Andric       !flags()->report_signal_unsafe)
142*68d75effSDimitry Andric     return;
143*68d75effSDimitry Andric   VarSizeStackTrace stack;
144*68d75effSDimitry Andric   ObtainCurrentStack(thr, pc, &stack);
145*68d75effSDimitry Andric   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
146*68d75effSDimitry Andric     return;
147*68d75effSDimitry Andric   ThreadRegistryLock l(ctx->thread_registry);
148*68d75effSDimitry Andric   ScopedReport rep(ReportTypeSignalUnsafe);
149*68d75effSDimitry Andric   rep.AddStack(stack, true);
150*68d75effSDimitry Andric   OutputReport(thr, rep);
151*68d75effSDimitry Andric }
152*68d75effSDimitry Andric 
153*68d75effSDimitry Andric static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
154*68d75effSDimitry Andric 
155*68d75effSDimitry Andric void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
156*68d75effSDimitry Andric                           bool signal) {
157*68d75effSDimitry Andric   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
158*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
159*68d75effSDimitry Andric       return nullptr;
160*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
161*68d75effSDimitry Andric     ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
162*68d75effSDimitry Andric   }
163*68d75effSDimitry Andric   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
164*68d75effSDimitry Andric   if (UNLIKELY(!p)) {
165*68d75effSDimitry Andric     SetAllocatorOutOfMemory();
166*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
167*68d75effSDimitry Andric       return nullptr;
168*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
169*68d75effSDimitry Andric     ReportOutOfMemory(sz, &stack);
170*68d75effSDimitry Andric   }
171*68d75effSDimitry Andric   if (ctx && ctx->initialized)
172*68d75effSDimitry Andric     OnUserAlloc(thr, pc, (uptr)p, sz, true);
173*68d75effSDimitry Andric   if (signal)
174*68d75effSDimitry Andric     SignalUnsafeCall(thr, pc);
175*68d75effSDimitry Andric   return p;
176*68d75effSDimitry Andric }
177*68d75effSDimitry Andric 
178*68d75effSDimitry Andric void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
179*68d75effSDimitry Andric   ScopedGlobalProcessor sgp;
180*68d75effSDimitry Andric   if (ctx && ctx->initialized)
181*68d75effSDimitry Andric     OnUserFree(thr, pc, (uptr)p, true);
182*68d75effSDimitry Andric   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
183*68d75effSDimitry Andric   if (signal)
184*68d75effSDimitry Andric     SignalUnsafeCall(thr, pc);
185*68d75effSDimitry Andric }
186*68d75effSDimitry Andric 
187*68d75effSDimitry Andric void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
188*68d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
189*68d75effSDimitry Andric }
190*68d75effSDimitry Andric 
191*68d75effSDimitry Andric void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
192*68d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
193*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
194*68d75effSDimitry Andric       return SetErrnoOnNull(nullptr);
195*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
196*68d75effSDimitry Andric     ReportCallocOverflow(n, size, &stack);
197*68d75effSDimitry Andric   }
198*68d75effSDimitry Andric   void *p = user_alloc_internal(thr, pc, n * size);
199*68d75effSDimitry Andric   if (p)
200*68d75effSDimitry Andric     internal_memset(p, 0, n * size);
201*68d75effSDimitry Andric   return SetErrnoOnNull(p);
202*68d75effSDimitry Andric }
203*68d75effSDimitry Andric 
204*68d75effSDimitry Andric void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
205*68d75effSDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
206*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
207*68d75effSDimitry Andric       return SetErrnoOnNull(nullptr);
208*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
209*68d75effSDimitry Andric     ReportReallocArrayOverflow(size, n, &stack);
210*68d75effSDimitry Andric   }
211*68d75effSDimitry Andric   return user_realloc(thr, pc, p, size * n);
212*68d75effSDimitry Andric }
213*68d75effSDimitry Andric 
214*68d75effSDimitry Andric void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
215*68d75effSDimitry Andric   DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
216*68d75effSDimitry Andric   ctx->metamap.AllocBlock(thr, pc, p, sz);
217*68d75effSDimitry Andric   if (write && thr->ignore_reads_and_writes == 0)
218*68d75effSDimitry Andric     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
219*68d75effSDimitry Andric   else
220*68d75effSDimitry Andric     MemoryResetRange(thr, pc, (uptr)p, sz);
221*68d75effSDimitry Andric }
222*68d75effSDimitry Andric 
223*68d75effSDimitry Andric void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
224*68d75effSDimitry Andric   CHECK_NE(p, (void*)0);
225*68d75effSDimitry Andric   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
226*68d75effSDimitry Andric   DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
227*68d75effSDimitry Andric   if (write && thr->ignore_reads_and_writes == 0)
228*68d75effSDimitry Andric     MemoryRangeFreed(thr, pc, (uptr)p, sz);
229*68d75effSDimitry Andric }
230*68d75effSDimitry Andric 
231*68d75effSDimitry Andric void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
232*68d75effSDimitry Andric   // FIXME: Handle "shrinking" more efficiently,
233*68d75effSDimitry Andric   // it seems that some software actually does this.
234*68d75effSDimitry Andric   if (!p)
235*68d75effSDimitry Andric     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
236*68d75effSDimitry Andric   if (!sz) {
237*68d75effSDimitry Andric     user_free(thr, pc, p);
238*68d75effSDimitry Andric     return nullptr;
239*68d75effSDimitry Andric   }
240*68d75effSDimitry Andric   void *new_p = user_alloc_internal(thr, pc, sz);
241*68d75effSDimitry Andric   if (new_p) {
242*68d75effSDimitry Andric     uptr old_sz = user_alloc_usable_size(p);
243*68d75effSDimitry Andric     internal_memcpy(new_p, p, min(old_sz, sz));
244*68d75effSDimitry Andric     user_free(thr, pc, p);
245*68d75effSDimitry Andric   }
246*68d75effSDimitry Andric   return SetErrnoOnNull(new_p);
247*68d75effSDimitry Andric }
248*68d75effSDimitry Andric 
249*68d75effSDimitry Andric void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
250*68d75effSDimitry Andric   if (UNLIKELY(!IsPowerOfTwo(align))) {
251*68d75effSDimitry Andric     errno = errno_EINVAL;
252*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
253*68d75effSDimitry Andric       return nullptr;
254*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
255*68d75effSDimitry Andric     ReportInvalidAllocationAlignment(align, &stack);
256*68d75effSDimitry Andric   }
257*68d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
258*68d75effSDimitry Andric }
259*68d75effSDimitry Andric 
260*68d75effSDimitry Andric int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
261*68d75effSDimitry Andric                         uptr sz) {
262*68d75effSDimitry Andric   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
263*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
264*68d75effSDimitry Andric       return errno_EINVAL;
265*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
266*68d75effSDimitry Andric     ReportInvalidPosixMemalignAlignment(align, &stack);
267*68d75effSDimitry Andric   }
268*68d75effSDimitry Andric   void *ptr = user_alloc_internal(thr, pc, sz, align);
269*68d75effSDimitry Andric   if (UNLIKELY(!ptr))
270*68d75effSDimitry Andric     // OOM error is already taken care of by user_alloc_internal.
271*68d75effSDimitry Andric     return errno_ENOMEM;
272*68d75effSDimitry Andric   CHECK(IsAligned((uptr)ptr, align));
273*68d75effSDimitry Andric   *memptr = ptr;
274*68d75effSDimitry Andric   return 0;
275*68d75effSDimitry Andric }
276*68d75effSDimitry Andric 
277*68d75effSDimitry Andric void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
278*68d75effSDimitry Andric   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
279*68d75effSDimitry Andric     errno = errno_EINVAL;
280*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
281*68d75effSDimitry Andric       return nullptr;
282*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
283*68d75effSDimitry Andric     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
284*68d75effSDimitry Andric   }
285*68d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
286*68d75effSDimitry Andric }
287*68d75effSDimitry Andric 
288*68d75effSDimitry Andric void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
289*68d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
290*68d75effSDimitry Andric }
291*68d75effSDimitry Andric 
292*68d75effSDimitry Andric void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
293*68d75effSDimitry Andric   uptr PageSize = GetPageSizeCached();
294*68d75effSDimitry Andric   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
295*68d75effSDimitry Andric     errno = errno_ENOMEM;
296*68d75effSDimitry Andric     if (AllocatorMayReturnNull())
297*68d75effSDimitry Andric       return nullptr;
298*68d75effSDimitry Andric     GET_STACK_TRACE_FATAL(thr, pc);
299*68d75effSDimitry Andric     ReportPvallocOverflow(sz, &stack);
300*68d75effSDimitry Andric   }
301*68d75effSDimitry Andric   // pvalloc(0) should allocate one page.
302*68d75effSDimitry Andric   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
303*68d75effSDimitry Andric   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
304*68d75effSDimitry Andric }
305*68d75effSDimitry Andric 
306*68d75effSDimitry Andric uptr user_alloc_usable_size(const void *p) {
307*68d75effSDimitry Andric   if (p == 0)
308*68d75effSDimitry Andric     return 0;
309*68d75effSDimitry Andric   MBlock *b = ctx->metamap.GetBlock((uptr)p);
310*68d75effSDimitry Andric   if (!b)
311*68d75effSDimitry Andric     return 0;  // Not a valid pointer.
312*68d75effSDimitry Andric   if (b->siz == 0)
313*68d75effSDimitry Andric     return 1;  // Zero-sized allocations are actually 1 byte.
314*68d75effSDimitry Andric   return b->siz;
315*68d75effSDimitry Andric }
316*68d75effSDimitry Andric 
317*68d75effSDimitry Andric void invoke_malloc_hook(void *ptr, uptr size) {
318*68d75effSDimitry Andric   ThreadState *thr = cur_thread();
319*68d75effSDimitry Andric   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
320*68d75effSDimitry Andric     return;
321*68d75effSDimitry Andric   __sanitizer_malloc_hook(ptr, size);
322*68d75effSDimitry Andric   RunMallocHooks(ptr, size);
323*68d75effSDimitry Andric }
324*68d75effSDimitry Andric 
325*68d75effSDimitry Andric void invoke_free_hook(void *ptr) {
326*68d75effSDimitry Andric   ThreadState *thr = cur_thread();
327*68d75effSDimitry Andric   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
328*68d75effSDimitry Andric     return;
329*68d75effSDimitry Andric   __sanitizer_free_hook(ptr);
330*68d75effSDimitry Andric   RunFreeHooks(ptr);
331*68d75effSDimitry Andric }
332*68d75effSDimitry Andric 
333*68d75effSDimitry Andric void *internal_alloc(MBlockType typ, uptr sz) {
334*68d75effSDimitry Andric   ThreadState *thr = cur_thread();
335*68d75effSDimitry Andric   if (thr->nomalloc) {
336*68d75effSDimitry Andric     thr->nomalloc = 0;  // CHECK calls internal_malloc().
337*68d75effSDimitry Andric     CHECK(0);
338*68d75effSDimitry Andric   }
339*68d75effSDimitry Andric   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
340*68d75effSDimitry Andric }
341*68d75effSDimitry Andric 
342*68d75effSDimitry Andric void internal_free(void *p) {
343*68d75effSDimitry Andric   ThreadState *thr = cur_thread();
344*68d75effSDimitry Andric   if (thr->nomalloc) {
345*68d75effSDimitry Andric     thr->nomalloc = 0;  // CHECK calls internal_malloc().
346*68d75effSDimitry Andric     CHECK(0);
347*68d75effSDimitry Andric   }
348*68d75effSDimitry Andric   InternalFree(p, &thr->proc()->internal_alloc_cache);
349*68d75effSDimitry Andric }
350*68d75effSDimitry Andric 
351*68d75effSDimitry Andric }  // namespace __tsan
352*68d75effSDimitry Andric 
353*68d75effSDimitry Andric using namespace __tsan;
354*68d75effSDimitry Andric 
355*68d75effSDimitry Andric extern "C" {
356*68d75effSDimitry Andric uptr __sanitizer_get_current_allocated_bytes() {
357*68d75effSDimitry Andric   uptr stats[AllocatorStatCount];
358*68d75effSDimitry Andric   allocator()->GetStats(stats);
359*68d75effSDimitry Andric   return stats[AllocatorStatAllocated];
360*68d75effSDimitry Andric }
361*68d75effSDimitry Andric 
362*68d75effSDimitry Andric uptr __sanitizer_get_heap_size() {
363*68d75effSDimitry Andric   uptr stats[AllocatorStatCount];
364*68d75effSDimitry Andric   allocator()->GetStats(stats);
365*68d75effSDimitry Andric   return stats[AllocatorStatMapped];
366*68d75effSDimitry Andric }
367*68d75effSDimitry Andric 
368*68d75effSDimitry Andric uptr __sanitizer_get_free_bytes() {
369*68d75effSDimitry Andric   return 1;
370*68d75effSDimitry Andric }
371*68d75effSDimitry Andric 
372*68d75effSDimitry Andric uptr __sanitizer_get_unmapped_bytes() {
373*68d75effSDimitry Andric   return 1;
374*68d75effSDimitry Andric }
375*68d75effSDimitry Andric 
376*68d75effSDimitry Andric uptr __sanitizer_get_estimated_allocated_size(uptr size) {
377*68d75effSDimitry Andric   return size;
378*68d75effSDimitry Andric }
379*68d75effSDimitry Andric 
380*68d75effSDimitry Andric int __sanitizer_get_ownership(const void *p) {
381*68d75effSDimitry Andric   return allocator()->GetBlockBegin(p) != 0;
382*68d75effSDimitry Andric }
383*68d75effSDimitry Andric 
384*68d75effSDimitry Andric uptr __sanitizer_get_allocated_size(const void *p) {
385*68d75effSDimitry Andric   return user_alloc_usable_size(p);
386*68d75effSDimitry Andric }
387*68d75effSDimitry Andric 
388*68d75effSDimitry Andric void __tsan_on_thread_idle() {
389*68d75effSDimitry Andric   ThreadState *thr = cur_thread();
390*68d75effSDimitry Andric   thr->clock.ResetCached(&thr->proc()->clock_cache);
391*68d75effSDimitry Andric   thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
392*68d75effSDimitry Andric   allocator()->SwallowCache(&thr->proc()->alloc_cache);
393*68d75effSDimitry Andric   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
394*68d75effSDimitry Andric   ctx->metamap.OnProcIdle(thr->proc());
395*68d75effSDimitry Andric }
396*68d75effSDimitry Andric }  // extern "C"
397