xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp (revision 349cc55c9796c4596a5b9904cd3281af295f878f)
168d75effSDimitry Andric //===-- tsan_sync.cpp -----------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
1068d75effSDimitry Andric //
1168d75effSDimitry Andric //===----------------------------------------------------------------------===//
1268d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
1368d75effSDimitry Andric #include "tsan_sync.h"
1468d75effSDimitry Andric #include "tsan_rtl.h"
1568d75effSDimitry Andric #include "tsan_mman.h"
1668d75effSDimitry Andric 
1768d75effSDimitry Andric namespace __tsan {
1868d75effSDimitry Andric 
1968d75effSDimitry Andric void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
2068d75effSDimitry Andric 
21fe6060f1SDimitry Andric SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
2268d75effSDimitry Andric 
23*349cc55cSDimitry Andric void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid,
24*349cc55cSDimitry Andric                    bool save_stack) {
2568d75effSDimitry Andric   this->addr = addr;
2668d75effSDimitry Andric   this->uid = uid;
2768d75effSDimitry Andric   this->next = 0;
2868d75effSDimitry Andric 
29*349cc55cSDimitry Andric   creation_stack_id = kInvalidStackID;
30*349cc55cSDimitry Andric   if (save_stack && !SANITIZER_GO)  // Go does not use them
3168d75effSDimitry Andric     creation_stack_id = CurrentStackId(thr, pc);
3268d75effSDimitry Andric   if (common_flags()->detect_deadlocks)
3368d75effSDimitry Andric     DDMutexInit(thr, pc, this);
3468d75effSDimitry Andric }
3568d75effSDimitry Andric 
3668d75effSDimitry Andric void SyncVar::Reset(Processor *proc) {
3768d75effSDimitry Andric   uid = 0;
38*349cc55cSDimitry Andric   creation_stack_id = kInvalidStackID;
3968d75effSDimitry Andric   owner_tid = kInvalidTid;
4068d75effSDimitry Andric   last_lock = 0;
4168d75effSDimitry Andric   recursion = 0;
4268d75effSDimitry Andric   atomic_store_relaxed(&flags, 0);
4368d75effSDimitry Andric 
4468d75effSDimitry Andric   if (proc == 0) {
4568d75effSDimitry Andric     CHECK_EQ(clock.size(), 0);
4668d75effSDimitry Andric     CHECK_EQ(read_clock.size(), 0);
4768d75effSDimitry Andric   } else {
4868d75effSDimitry Andric     clock.Reset(&proc->clock_cache);
4968d75effSDimitry Andric     read_clock.Reset(&proc->clock_cache);
5068d75effSDimitry Andric   }
5168d75effSDimitry Andric }
5268d75effSDimitry Andric 
5368d75effSDimitry Andric MetaMap::MetaMap()
54fe6060f1SDimitry Andric     : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
55fe6060f1SDimitry Andric       sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
5668d75effSDimitry Andric   atomic_store(&uid_gen_, 0, memory_order_relaxed);
5768d75effSDimitry Andric }
5868d75effSDimitry Andric 
5968d75effSDimitry Andric void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
6068d75effSDimitry Andric   u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
6168d75effSDimitry Andric   MBlock *b = block_alloc_.Map(idx);
6268d75effSDimitry Andric   b->siz = sz;
6368d75effSDimitry Andric   b->tag = 0;
6468d75effSDimitry Andric   b->tid = thr->tid;
6568d75effSDimitry Andric   b->stk = CurrentStackId(thr, pc);
6668d75effSDimitry Andric   u32 *meta = MemToMeta(p);
6768d75effSDimitry Andric   DCHECK_EQ(*meta, 0);
6868d75effSDimitry Andric   *meta = idx | kFlagBlock;
6968d75effSDimitry Andric }
7068d75effSDimitry Andric 
7168d75effSDimitry Andric uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
7268d75effSDimitry Andric   MBlock* b = GetBlock(p);
7368d75effSDimitry Andric   if (b == 0)
7468d75effSDimitry Andric     return 0;
7568d75effSDimitry Andric   uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
7668d75effSDimitry Andric   FreeRange(proc, p, sz);
7768d75effSDimitry Andric   return sz;
7868d75effSDimitry Andric }
7968d75effSDimitry Andric 
8068d75effSDimitry Andric bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
8168d75effSDimitry Andric   bool has_something = false;
8268d75effSDimitry Andric   u32 *meta = MemToMeta(p);
8368d75effSDimitry Andric   u32 *end = MemToMeta(p + sz);
8468d75effSDimitry Andric   if (end == meta)
8568d75effSDimitry Andric     end++;
8668d75effSDimitry Andric   for (; meta < end; meta++) {
8768d75effSDimitry Andric     u32 idx = *meta;
8868d75effSDimitry Andric     if (idx == 0) {
8968d75effSDimitry Andric       // Note: don't write to meta in this case -- the block can be huge.
9068d75effSDimitry Andric       continue;
9168d75effSDimitry Andric     }
9268d75effSDimitry Andric     *meta = 0;
9368d75effSDimitry Andric     has_something = true;
9468d75effSDimitry Andric     while (idx != 0) {
9568d75effSDimitry Andric       if (idx & kFlagBlock) {
9668d75effSDimitry Andric         block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
9768d75effSDimitry Andric         break;
9868d75effSDimitry Andric       } else if (idx & kFlagSync) {
9968d75effSDimitry Andric         DCHECK(idx & kFlagSync);
10068d75effSDimitry Andric         SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
10168d75effSDimitry Andric         u32 next = s->next;
10268d75effSDimitry Andric         s->Reset(proc);
10368d75effSDimitry Andric         sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
10468d75effSDimitry Andric         idx = next;
10568d75effSDimitry Andric       } else {
10668d75effSDimitry Andric         CHECK(0);
10768d75effSDimitry Andric       }
10868d75effSDimitry Andric     }
10968d75effSDimitry Andric   }
11068d75effSDimitry Andric   return has_something;
11168d75effSDimitry Andric }
11268d75effSDimitry Andric 
11368d75effSDimitry Andric // ResetRange removes all meta objects from the range.
11468d75effSDimitry Andric // It is called for large mmap-ed regions. The function is best-effort wrt
11568d75effSDimitry Andric // freeing of meta objects, because we don't want to page in the whole range
11668d75effSDimitry Andric // which can be huge. The function probes pages one-by-one until it finds a page
11768d75effSDimitry Andric // without meta objects, at this point it stops freeing meta objects. Because
11868d75effSDimitry Andric // thread stacks grow top-down, we do the same starting from end as well.
11968d75effSDimitry Andric void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
12068d75effSDimitry Andric   if (SANITIZER_GO) {
12168d75effSDimitry Andric     // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
12268d75effSDimitry Andric     // so we do the optimization only for C/C++.
12368d75effSDimitry Andric     FreeRange(proc, p, sz);
12468d75effSDimitry Andric     return;
12568d75effSDimitry Andric   }
12668d75effSDimitry Andric   const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
12768d75effSDimitry Andric   const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
12868d75effSDimitry Andric   if (sz <= 4 * kPageSize) {
12968d75effSDimitry Andric     // If the range is small, just do the normal free procedure.
13068d75effSDimitry Andric     FreeRange(proc, p, sz);
13168d75effSDimitry Andric     return;
13268d75effSDimitry Andric   }
13368d75effSDimitry Andric   // First, round both ends of the range to page size.
13468d75effSDimitry Andric   uptr diff = RoundUp(p, kPageSize) - p;
13568d75effSDimitry Andric   if (diff != 0) {
13668d75effSDimitry Andric     FreeRange(proc, p, diff);
13768d75effSDimitry Andric     p += diff;
13868d75effSDimitry Andric     sz -= diff;
13968d75effSDimitry Andric   }
14068d75effSDimitry Andric   diff = p + sz - RoundDown(p + sz, kPageSize);
14168d75effSDimitry Andric   if (diff != 0) {
14268d75effSDimitry Andric     FreeRange(proc, p + sz - diff, diff);
14368d75effSDimitry Andric     sz -= diff;
14468d75effSDimitry Andric   }
14568d75effSDimitry Andric   // Now we must have a non-empty page-aligned range.
14668d75effSDimitry Andric   CHECK_GT(sz, 0);
14768d75effSDimitry Andric   CHECK_EQ(p, RoundUp(p, kPageSize));
14868d75effSDimitry Andric   CHECK_EQ(sz, RoundUp(sz, kPageSize));
14968d75effSDimitry Andric   const uptr p0 = p;
15068d75effSDimitry Andric   const uptr sz0 = sz;
15168d75effSDimitry Andric   // Probe start of the range.
15268d75effSDimitry Andric   for (uptr checked = 0; sz > 0; checked += kPageSize) {
15368d75effSDimitry Andric     bool has_something = FreeRange(proc, p, kPageSize);
15468d75effSDimitry Andric     p += kPageSize;
15568d75effSDimitry Andric     sz -= kPageSize;
15668d75effSDimitry Andric     if (!has_something && checked > (128 << 10))
15768d75effSDimitry Andric       break;
15868d75effSDimitry Andric   }
15968d75effSDimitry Andric   // Probe end of the range.
16068d75effSDimitry Andric   for (uptr checked = 0; sz > 0; checked += kPageSize) {
16168d75effSDimitry Andric     bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
16268d75effSDimitry Andric     sz -= kPageSize;
16368d75effSDimitry Andric     // Stacks grow down, so sync object are most likely at the end of the region
16468d75effSDimitry Andric     // (if it is a stack). The very end of the stack is TLS and tsan increases
16568d75effSDimitry Andric     // TLS by at least 256K, so check at least 512K.
16668d75effSDimitry Andric     if (!has_something && checked > (512 << 10))
16768d75effSDimitry Andric       break;
16868d75effSDimitry Andric   }
16968d75effSDimitry Andric   // Finally, page out the whole range (including the parts that we've just
17068d75effSDimitry Andric   // freed). Note: we can't simply madvise, because we need to leave a zeroed
17168d75effSDimitry Andric   // range (otherwise __tsan_java_move can crash if it encounters a left-over
17268d75effSDimitry Andric   // meta objects in java heap).
17368d75effSDimitry Andric   uptr metap = (uptr)MemToMeta(p0);
17468d75effSDimitry Andric   uptr metasz = sz0 / kMetaRatio;
17568d75effSDimitry Andric   UnmapOrDie((void*)metap, metasz);
176e8d8bef9SDimitry Andric   if (!MmapFixedSuperNoReserve(metap, metasz))
17768d75effSDimitry Andric     Die();
17868d75effSDimitry Andric }
17968d75effSDimitry Andric 
18068d75effSDimitry Andric MBlock* MetaMap::GetBlock(uptr p) {
18168d75effSDimitry Andric   u32 *meta = MemToMeta(p);
18268d75effSDimitry Andric   u32 idx = *meta;
18368d75effSDimitry Andric   for (;;) {
18468d75effSDimitry Andric     if (idx == 0)
18568d75effSDimitry Andric       return 0;
18668d75effSDimitry Andric     if (idx & kFlagBlock)
18768d75effSDimitry Andric       return block_alloc_.Map(idx & ~kFlagMask);
18868d75effSDimitry Andric     DCHECK(idx & kFlagSync);
18968d75effSDimitry Andric     SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
19068d75effSDimitry Andric     idx = s->next;
19168d75effSDimitry Andric   }
19268d75effSDimitry Andric }
19368d75effSDimitry Andric 
194*349cc55cSDimitry Andric SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
195*349cc55cSDimitry Andric                           bool save_stack) {
19668d75effSDimitry Andric   u32 *meta = MemToMeta(addr);
19768d75effSDimitry Andric   u32 idx0 = *meta;
19868d75effSDimitry Andric   u32 myidx = 0;
199*349cc55cSDimitry Andric   SyncVar *mys = nullptr;
20068d75effSDimitry Andric   for (;;) {
201*349cc55cSDimitry Andric     for (u32 idx = idx0; idx && !(idx & kFlagBlock);) {
20268d75effSDimitry Andric       DCHECK(idx & kFlagSync);
20368d75effSDimitry Andric       SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
204*349cc55cSDimitry Andric       if (LIKELY(s->addr == addr)) {
205*349cc55cSDimitry Andric         if (UNLIKELY(myidx != 0)) {
20668d75effSDimitry Andric           mys->Reset(thr->proc());
20768d75effSDimitry Andric           sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
20868d75effSDimitry Andric         }
20968d75effSDimitry Andric         return s;
21068d75effSDimitry Andric       }
21168d75effSDimitry Andric       idx = s->next;
21268d75effSDimitry Andric     }
21368d75effSDimitry Andric     if (!create)
214*349cc55cSDimitry Andric       return nullptr;
215*349cc55cSDimitry Andric     if (UNLIKELY(*meta != idx0)) {
21668d75effSDimitry Andric       idx0 = *meta;
21768d75effSDimitry Andric       continue;
21868d75effSDimitry Andric     }
21968d75effSDimitry Andric 
220*349cc55cSDimitry Andric     if (LIKELY(myidx == 0)) {
22168d75effSDimitry Andric       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
22268d75effSDimitry Andric       myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
22368d75effSDimitry Andric       mys = sync_alloc_.Map(myidx);
224*349cc55cSDimitry Andric       mys->Init(thr, pc, addr, uid, save_stack);
22568d75effSDimitry Andric     }
22668d75effSDimitry Andric     mys->next = idx0;
22768d75effSDimitry Andric     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
22868d75effSDimitry Andric         myidx | kFlagSync, memory_order_release)) {
22968d75effSDimitry Andric       return mys;
23068d75effSDimitry Andric     }
23168d75effSDimitry Andric   }
23268d75effSDimitry Andric }
23368d75effSDimitry Andric 
23468d75effSDimitry Andric void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
23568d75effSDimitry Andric   // src and dst can overlap,
23668d75effSDimitry Andric   // there are no concurrent accesses to the regions (e.g. stop-the-world).
23768d75effSDimitry Andric   CHECK_NE(src, dst);
23868d75effSDimitry Andric   CHECK_NE(sz, 0);
23968d75effSDimitry Andric   uptr diff = dst - src;
24068d75effSDimitry Andric   u32 *src_meta = MemToMeta(src);
24168d75effSDimitry Andric   u32 *dst_meta = MemToMeta(dst);
24268d75effSDimitry Andric   u32 *src_meta_end = MemToMeta(src + sz);
24368d75effSDimitry Andric   uptr inc = 1;
24468d75effSDimitry Andric   if (dst > src) {
24568d75effSDimitry Andric     src_meta = MemToMeta(src + sz) - 1;
24668d75effSDimitry Andric     dst_meta = MemToMeta(dst + sz) - 1;
24768d75effSDimitry Andric     src_meta_end = MemToMeta(src) - 1;
24868d75effSDimitry Andric     inc = -1;
24968d75effSDimitry Andric   }
25068d75effSDimitry Andric   for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
25168d75effSDimitry Andric     CHECK_EQ(*dst_meta, 0);
25268d75effSDimitry Andric     u32 idx = *src_meta;
25368d75effSDimitry Andric     *src_meta = 0;
25468d75effSDimitry Andric     *dst_meta = idx;
25568d75effSDimitry Andric     // Patch the addresses in sync objects.
25668d75effSDimitry Andric     while (idx != 0) {
25768d75effSDimitry Andric       if (idx & kFlagBlock)
25868d75effSDimitry Andric         break;
25968d75effSDimitry Andric       CHECK(idx & kFlagSync);
26068d75effSDimitry Andric       SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
26168d75effSDimitry Andric       s->addr += diff;
26268d75effSDimitry Andric       idx = s->next;
26368d75effSDimitry Andric     }
26468d75effSDimitry Andric   }
26568d75effSDimitry Andric }
26668d75effSDimitry Andric 
26768d75effSDimitry Andric void MetaMap::OnProcIdle(Processor *proc) {
26868d75effSDimitry Andric   block_alloc_.FlushCache(&proc->block_cache);
26968d75effSDimitry Andric   sync_alloc_.FlushCache(&proc->sync_cache);
27068d75effSDimitry Andric }
27168d75effSDimitry Andric 
272*349cc55cSDimitry Andric MetaMap::MemoryStats MetaMap::GetMemoryStats() const {
273*349cc55cSDimitry Andric   MemoryStats stats;
274*349cc55cSDimitry Andric   stats.mem_block = block_alloc_.AllocatedMemory();
275*349cc55cSDimitry Andric   stats.sync_obj = sync_alloc_.AllocatedMemory();
276*349cc55cSDimitry Andric   return stats;
277*349cc55cSDimitry Andric }
278*349cc55cSDimitry Andric 
27968d75effSDimitry Andric }  // namespace __tsan
280