168d75effSDimitry Andric //===-- tsan_sync.cpp -----------------------------------------------------===// 268d75effSDimitry Andric // 368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 668d75effSDimitry Andric // 768d75effSDimitry Andric //===----------------------------------------------------------------------===// 868d75effSDimitry Andric // 968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector. 1068d75effSDimitry Andric // 1168d75effSDimitry Andric //===----------------------------------------------------------------------===// 1268d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h" 1368d75effSDimitry Andric #include "tsan_sync.h" 1468d75effSDimitry Andric #include "tsan_rtl.h" 1568d75effSDimitry Andric #include "tsan_mman.h" 1668d75effSDimitry Andric 1768d75effSDimitry Andric namespace __tsan { 1868d75effSDimitry Andric 1968d75effSDimitry Andric void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s); 2068d75effSDimitry Andric 2168d75effSDimitry Andric SyncVar::SyncVar() 2268d75effSDimitry Andric : mtx(MutexTypeSyncVar, StatMtxSyncVar) { 2368d75effSDimitry Andric Reset(0); 2468d75effSDimitry Andric } 2568d75effSDimitry Andric 2668d75effSDimitry Andric void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { 2768d75effSDimitry Andric this->addr = addr; 2868d75effSDimitry Andric this->uid = uid; 2968d75effSDimitry Andric this->next = 0; 3068d75effSDimitry Andric 3168d75effSDimitry Andric creation_stack_id = 0; 3268d75effSDimitry Andric if (!SANITIZER_GO) // Go does not use them 3368d75effSDimitry Andric creation_stack_id = CurrentStackId(thr, pc); 3468d75effSDimitry Andric if (common_flags()->detect_deadlocks) 3568d75effSDimitry Andric DDMutexInit(thr, pc, this); 3668d75effSDimitry Andric } 3768d75effSDimitry Andric 3868d75effSDimitry Andric void SyncVar::Reset(Processor *proc) { 3968d75effSDimitry Andric uid = 0; 4068d75effSDimitry Andric creation_stack_id = 0; 4168d75effSDimitry Andric owner_tid = kInvalidTid; 4268d75effSDimitry Andric last_lock = 0; 4368d75effSDimitry Andric recursion = 0; 4468d75effSDimitry Andric atomic_store_relaxed(&flags, 0); 4568d75effSDimitry Andric 4668d75effSDimitry Andric if (proc == 0) { 4768d75effSDimitry Andric CHECK_EQ(clock.size(), 0); 4868d75effSDimitry Andric CHECK_EQ(read_clock.size(), 0); 4968d75effSDimitry Andric } else { 5068d75effSDimitry Andric clock.Reset(&proc->clock_cache); 5168d75effSDimitry Andric read_clock.Reset(&proc->clock_cache); 5268d75effSDimitry Andric } 5368d75effSDimitry Andric } 5468d75effSDimitry Andric 5568d75effSDimitry Andric MetaMap::MetaMap() 5668d75effSDimitry Andric : block_alloc_("heap block allocator") 5768d75effSDimitry Andric , sync_alloc_("sync allocator") { 5868d75effSDimitry Andric atomic_store(&uid_gen_, 0, memory_order_relaxed); 5968d75effSDimitry Andric } 6068d75effSDimitry Andric 6168d75effSDimitry Andric void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) { 6268d75effSDimitry Andric u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache); 6368d75effSDimitry Andric MBlock *b = block_alloc_.Map(idx); 6468d75effSDimitry Andric b->siz = sz; 6568d75effSDimitry Andric b->tag = 0; 6668d75effSDimitry Andric b->tid = thr->tid; 6768d75effSDimitry Andric b->stk = CurrentStackId(thr, pc); 6868d75effSDimitry Andric u32 *meta = MemToMeta(p); 6968d75effSDimitry Andric DCHECK_EQ(*meta, 0); 7068d75effSDimitry Andric *meta = idx | kFlagBlock; 7168d75effSDimitry Andric } 7268d75effSDimitry Andric 7368d75effSDimitry Andric uptr MetaMap::FreeBlock(Processor *proc, uptr p) { 7468d75effSDimitry Andric MBlock* b = GetBlock(p); 7568d75effSDimitry Andric if (b == 0) 7668d75effSDimitry Andric return 0; 7768d75effSDimitry Andric uptr sz = RoundUpTo(b->siz, kMetaShadowCell); 7868d75effSDimitry Andric FreeRange(proc, p, sz); 7968d75effSDimitry Andric return sz; 8068d75effSDimitry Andric } 8168d75effSDimitry Andric 8268d75effSDimitry Andric bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) { 8368d75effSDimitry Andric bool has_something = false; 8468d75effSDimitry Andric u32 *meta = MemToMeta(p); 8568d75effSDimitry Andric u32 *end = MemToMeta(p + sz); 8668d75effSDimitry Andric if (end == meta) 8768d75effSDimitry Andric end++; 8868d75effSDimitry Andric for (; meta < end; meta++) { 8968d75effSDimitry Andric u32 idx = *meta; 9068d75effSDimitry Andric if (idx == 0) { 9168d75effSDimitry Andric // Note: don't write to meta in this case -- the block can be huge. 9268d75effSDimitry Andric continue; 9368d75effSDimitry Andric } 9468d75effSDimitry Andric *meta = 0; 9568d75effSDimitry Andric has_something = true; 9668d75effSDimitry Andric while (idx != 0) { 9768d75effSDimitry Andric if (idx & kFlagBlock) { 9868d75effSDimitry Andric block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask); 9968d75effSDimitry Andric break; 10068d75effSDimitry Andric } else if (idx & kFlagSync) { 10168d75effSDimitry Andric DCHECK(idx & kFlagSync); 10268d75effSDimitry Andric SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); 10368d75effSDimitry Andric u32 next = s->next; 10468d75effSDimitry Andric s->Reset(proc); 10568d75effSDimitry Andric sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask); 10668d75effSDimitry Andric idx = next; 10768d75effSDimitry Andric } else { 10868d75effSDimitry Andric CHECK(0); 10968d75effSDimitry Andric } 11068d75effSDimitry Andric } 11168d75effSDimitry Andric } 11268d75effSDimitry Andric return has_something; 11368d75effSDimitry Andric } 11468d75effSDimitry Andric 11568d75effSDimitry Andric // ResetRange removes all meta objects from the range. 11668d75effSDimitry Andric // It is called for large mmap-ed regions. The function is best-effort wrt 11768d75effSDimitry Andric // freeing of meta objects, because we don't want to page in the whole range 11868d75effSDimitry Andric // which can be huge. The function probes pages one-by-one until it finds a page 11968d75effSDimitry Andric // without meta objects, at this point it stops freeing meta objects. Because 12068d75effSDimitry Andric // thread stacks grow top-down, we do the same starting from end as well. 12168d75effSDimitry Andric void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) { 12268d75effSDimitry Andric if (SANITIZER_GO) { 12368d75effSDimitry Andric // UnmapOrDie/MmapFixedNoReserve does not work on Windows, 12468d75effSDimitry Andric // so we do the optimization only for C/C++. 12568d75effSDimitry Andric FreeRange(proc, p, sz); 12668d75effSDimitry Andric return; 12768d75effSDimitry Andric } 12868d75effSDimitry Andric const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; 12968d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached() * kMetaRatio; 13068d75effSDimitry Andric if (sz <= 4 * kPageSize) { 13168d75effSDimitry Andric // If the range is small, just do the normal free procedure. 13268d75effSDimitry Andric FreeRange(proc, p, sz); 13368d75effSDimitry Andric return; 13468d75effSDimitry Andric } 13568d75effSDimitry Andric // First, round both ends of the range to page size. 13668d75effSDimitry Andric uptr diff = RoundUp(p, kPageSize) - p; 13768d75effSDimitry Andric if (diff != 0) { 13868d75effSDimitry Andric FreeRange(proc, p, diff); 13968d75effSDimitry Andric p += diff; 14068d75effSDimitry Andric sz -= diff; 14168d75effSDimitry Andric } 14268d75effSDimitry Andric diff = p + sz - RoundDown(p + sz, kPageSize); 14368d75effSDimitry Andric if (diff != 0) { 14468d75effSDimitry Andric FreeRange(proc, p + sz - diff, diff); 14568d75effSDimitry Andric sz -= diff; 14668d75effSDimitry Andric } 14768d75effSDimitry Andric // Now we must have a non-empty page-aligned range. 14868d75effSDimitry Andric CHECK_GT(sz, 0); 14968d75effSDimitry Andric CHECK_EQ(p, RoundUp(p, kPageSize)); 15068d75effSDimitry Andric CHECK_EQ(sz, RoundUp(sz, kPageSize)); 15168d75effSDimitry Andric const uptr p0 = p; 15268d75effSDimitry Andric const uptr sz0 = sz; 15368d75effSDimitry Andric // Probe start of the range. 15468d75effSDimitry Andric for (uptr checked = 0; sz > 0; checked += kPageSize) { 15568d75effSDimitry Andric bool has_something = FreeRange(proc, p, kPageSize); 15668d75effSDimitry Andric p += kPageSize; 15768d75effSDimitry Andric sz -= kPageSize; 15868d75effSDimitry Andric if (!has_something && checked > (128 << 10)) 15968d75effSDimitry Andric break; 16068d75effSDimitry Andric } 16168d75effSDimitry Andric // Probe end of the range. 16268d75effSDimitry Andric for (uptr checked = 0; sz > 0; checked += kPageSize) { 16368d75effSDimitry Andric bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize); 16468d75effSDimitry Andric sz -= kPageSize; 16568d75effSDimitry Andric // Stacks grow down, so sync object are most likely at the end of the region 16668d75effSDimitry Andric // (if it is a stack). The very end of the stack is TLS and tsan increases 16768d75effSDimitry Andric // TLS by at least 256K, so check at least 512K. 16868d75effSDimitry Andric if (!has_something && checked > (512 << 10)) 16968d75effSDimitry Andric break; 17068d75effSDimitry Andric } 17168d75effSDimitry Andric // Finally, page out the whole range (including the parts that we've just 17268d75effSDimitry Andric // freed). Note: we can't simply madvise, because we need to leave a zeroed 17368d75effSDimitry Andric // range (otherwise __tsan_java_move can crash if it encounters a left-over 17468d75effSDimitry Andric // meta objects in java heap). 17568d75effSDimitry Andric uptr metap = (uptr)MemToMeta(p0); 17668d75effSDimitry Andric uptr metasz = sz0 / kMetaRatio; 17768d75effSDimitry Andric UnmapOrDie((void*)metap, metasz); 178*e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(metap, metasz)) 17968d75effSDimitry Andric Die(); 18068d75effSDimitry Andric } 18168d75effSDimitry Andric 18268d75effSDimitry Andric MBlock* MetaMap::GetBlock(uptr p) { 18368d75effSDimitry Andric u32 *meta = MemToMeta(p); 18468d75effSDimitry Andric u32 idx = *meta; 18568d75effSDimitry Andric for (;;) { 18668d75effSDimitry Andric if (idx == 0) 18768d75effSDimitry Andric return 0; 18868d75effSDimitry Andric if (idx & kFlagBlock) 18968d75effSDimitry Andric return block_alloc_.Map(idx & ~kFlagMask); 19068d75effSDimitry Andric DCHECK(idx & kFlagSync); 19168d75effSDimitry Andric SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); 19268d75effSDimitry Andric idx = s->next; 19368d75effSDimitry Andric } 19468d75effSDimitry Andric } 19568d75effSDimitry Andric 19668d75effSDimitry Andric SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc, 19768d75effSDimitry Andric uptr addr, bool write_lock) { 19868d75effSDimitry Andric return GetAndLock(thr, pc, addr, write_lock, true); 19968d75effSDimitry Andric } 20068d75effSDimitry Andric 20168d75effSDimitry Andric SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) { 20268d75effSDimitry Andric return GetAndLock(0, 0, addr, write_lock, false); 20368d75effSDimitry Andric } 20468d75effSDimitry Andric 20568d75effSDimitry Andric SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, 20668d75effSDimitry Andric uptr addr, bool write_lock, bool create) { 20768d75effSDimitry Andric u32 *meta = MemToMeta(addr); 20868d75effSDimitry Andric u32 idx0 = *meta; 20968d75effSDimitry Andric u32 myidx = 0; 21068d75effSDimitry Andric SyncVar *mys = 0; 21168d75effSDimitry Andric for (;;) { 21268d75effSDimitry Andric u32 idx = idx0; 21368d75effSDimitry Andric for (;;) { 21468d75effSDimitry Andric if (idx == 0) 21568d75effSDimitry Andric break; 21668d75effSDimitry Andric if (idx & kFlagBlock) 21768d75effSDimitry Andric break; 21868d75effSDimitry Andric DCHECK(idx & kFlagSync); 21968d75effSDimitry Andric SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); 22068d75effSDimitry Andric if (s->addr == addr) { 22168d75effSDimitry Andric if (myidx != 0) { 22268d75effSDimitry Andric mys->Reset(thr->proc()); 22368d75effSDimitry Andric sync_alloc_.Free(&thr->proc()->sync_cache, myidx); 22468d75effSDimitry Andric } 22568d75effSDimitry Andric if (write_lock) 22668d75effSDimitry Andric s->mtx.Lock(); 22768d75effSDimitry Andric else 22868d75effSDimitry Andric s->mtx.ReadLock(); 22968d75effSDimitry Andric return s; 23068d75effSDimitry Andric } 23168d75effSDimitry Andric idx = s->next; 23268d75effSDimitry Andric } 23368d75effSDimitry Andric if (!create) 23468d75effSDimitry Andric return 0; 23568d75effSDimitry Andric if (*meta != idx0) { 23668d75effSDimitry Andric idx0 = *meta; 23768d75effSDimitry Andric continue; 23868d75effSDimitry Andric } 23968d75effSDimitry Andric 24068d75effSDimitry Andric if (myidx == 0) { 24168d75effSDimitry Andric const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); 24268d75effSDimitry Andric myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache); 24368d75effSDimitry Andric mys = sync_alloc_.Map(myidx); 24468d75effSDimitry Andric mys->Init(thr, pc, addr, uid); 24568d75effSDimitry Andric } 24668d75effSDimitry Andric mys->next = idx0; 24768d75effSDimitry Andric if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, 24868d75effSDimitry Andric myidx | kFlagSync, memory_order_release)) { 24968d75effSDimitry Andric if (write_lock) 25068d75effSDimitry Andric mys->mtx.Lock(); 25168d75effSDimitry Andric else 25268d75effSDimitry Andric mys->mtx.ReadLock(); 25368d75effSDimitry Andric return mys; 25468d75effSDimitry Andric } 25568d75effSDimitry Andric } 25668d75effSDimitry Andric } 25768d75effSDimitry Andric 25868d75effSDimitry Andric void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) { 25968d75effSDimitry Andric // src and dst can overlap, 26068d75effSDimitry Andric // there are no concurrent accesses to the regions (e.g. stop-the-world). 26168d75effSDimitry Andric CHECK_NE(src, dst); 26268d75effSDimitry Andric CHECK_NE(sz, 0); 26368d75effSDimitry Andric uptr diff = dst - src; 26468d75effSDimitry Andric u32 *src_meta = MemToMeta(src); 26568d75effSDimitry Andric u32 *dst_meta = MemToMeta(dst); 26668d75effSDimitry Andric u32 *src_meta_end = MemToMeta(src + sz); 26768d75effSDimitry Andric uptr inc = 1; 26868d75effSDimitry Andric if (dst > src) { 26968d75effSDimitry Andric src_meta = MemToMeta(src + sz) - 1; 27068d75effSDimitry Andric dst_meta = MemToMeta(dst + sz) - 1; 27168d75effSDimitry Andric src_meta_end = MemToMeta(src) - 1; 27268d75effSDimitry Andric inc = -1; 27368d75effSDimitry Andric } 27468d75effSDimitry Andric for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) { 27568d75effSDimitry Andric CHECK_EQ(*dst_meta, 0); 27668d75effSDimitry Andric u32 idx = *src_meta; 27768d75effSDimitry Andric *src_meta = 0; 27868d75effSDimitry Andric *dst_meta = idx; 27968d75effSDimitry Andric // Patch the addresses in sync objects. 28068d75effSDimitry Andric while (idx != 0) { 28168d75effSDimitry Andric if (idx & kFlagBlock) 28268d75effSDimitry Andric break; 28368d75effSDimitry Andric CHECK(idx & kFlagSync); 28468d75effSDimitry Andric SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); 28568d75effSDimitry Andric s->addr += diff; 28668d75effSDimitry Andric idx = s->next; 28768d75effSDimitry Andric } 28868d75effSDimitry Andric } 28968d75effSDimitry Andric } 29068d75effSDimitry Andric 29168d75effSDimitry Andric void MetaMap::OnProcIdle(Processor *proc) { 29268d75effSDimitry Andric block_alloc_.FlushCache(&proc->block_cache); 29368d75effSDimitry Andric sync_alloc_.FlushCache(&proc->sync_cache); 29468d75effSDimitry Andric } 29568d75effSDimitry Andric 29668d75effSDimitry Andric } // namespace __tsan 297