xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp (revision fe6060f10f634930ff71b7c50291ddc610da2475)
168d75effSDimitry Andric //===-- tsan_sync.cpp -----------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
1068d75effSDimitry Andric //
1168d75effSDimitry Andric //===----------------------------------------------------------------------===//
1268d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
1368d75effSDimitry Andric #include "tsan_sync.h"
1468d75effSDimitry Andric #include "tsan_rtl.h"
1568d75effSDimitry Andric #include "tsan_mman.h"
1668d75effSDimitry Andric 
1768d75effSDimitry Andric namespace __tsan {
1868d75effSDimitry Andric 
1968d75effSDimitry Andric void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
2068d75effSDimitry Andric 
21*fe6060f1SDimitry Andric SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
2268d75effSDimitry Andric 
2368d75effSDimitry Andric void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
2468d75effSDimitry Andric   this->addr = addr;
2568d75effSDimitry Andric   this->uid = uid;
2668d75effSDimitry Andric   this->next = 0;
2768d75effSDimitry Andric 
2868d75effSDimitry Andric   creation_stack_id = 0;
2968d75effSDimitry Andric   if (!SANITIZER_GO)  // Go does not use them
3068d75effSDimitry Andric     creation_stack_id = CurrentStackId(thr, pc);
3168d75effSDimitry Andric   if (common_flags()->detect_deadlocks)
3268d75effSDimitry Andric     DDMutexInit(thr, pc, this);
3368d75effSDimitry Andric }
3468d75effSDimitry Andric 
3568d75effSDimitry Andric void SyncVar::Reset(Processor *proc) {
3668d75effSDimitry Andric   uid = 0;
3768d75effSDimitry Andric   creation_stack_id = 0;
3868d75effSDimitry Andric   owner_tid = kInvalidTid;
3968d75effSDimitry Andric   last_lock = 0;
4068d75effSDimitry Andric   recursion = 0;
4168d75effSDimitry Andric   atomic_store_relaxed(&flags, 0);
4268d75effSDimitry Andric 
4368d75effSDimitry Andric   if (proc == 0) {
4468d75effSDimitry Andric     CHECK_EQ(clock.size(), 0);
4568d75effSDimitry Andric     CHECK_EQ(read_clock.size(), 0);
4668d75effSDimitry Andric   } else {
4768d75effSDimitry Andric     clock.Reset(&proc->clock_cache);
4868d75effSDimitry Andric     read_clock.Reset(&proc->clock_cache);
4968d75effSDimitry Andric   }
5068d75effSDimitry Andric }
5168d75effSDimitry Andric 
5268d75effSDimitry Andric MetaMap::MetaMap()
53*fe6060f1SDimitry Andric     : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
54*fe6060f1SDimitry Andric       sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
5568d75effSDimitry Andric   atomic_store(&uid_gen_, 0, memory_order_relaxed);
5668d75effSDimitry Andric }
5768d75effSDimitry Andric 
5868d75effSDimitry Andric void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
5968d75effSDimitry Andric   u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
6068d75effSDimitry Andric   MBlock *b = block_alloc_.Map(idx);
6168d75effSDimitry Andric   b->siz = sz;
6268d75effSDimitry Andric   b->tag = 0;
6368d75effSDimitry Andric   b->tid = thr->tid;
6468d75effSDimitry Andric   b->stk = CurrentStackId(thr, pc);
6568d75effSDimitry Andric   u32 *meta = MemToMeta(p);
6668d75effSDimitry Andric   DCHECK_EQ(*meta, 0);
6768d75effSDimitry Andric   *meta = idx | kFlagBlock;
6868d75effSDimitry Andric }
6968d75effSDimitry Andric 
7068d75effSDimitry Andric uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
7168d75effSDimitry Andric   MBlock* b = GetBlock(p);
7268d75effSDimitry Andric   if (b == 0)
7368d75effSDimitry Andric     return 0;
7468d75effSDimitry Andric   uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
7568d75effSDimitry Andric   FreeRange(proc, p, sz);
7668d75effSDimitry Andric   return sz;
7768d75effSDimitry Andric }
7868d75effSDimitry Andric 
7968d75effSDimitry Andric bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
8068d75effSDimitry Andric   bool has_something = false;
8168d75effSDimitry Andric   u32 *meta = MemToMeta(p);
8268d75effSDimitry Andric   u32 *end = MemToMeta(p + sz);
8368d75effSDimitry Andric   if (end == meta)
8468d75effSDimitry Andric     end++;
8568d75effSDimitry Andric   for (; meta < end; meta++) {
8668d75effSDimitry Andric     u32 idx = *meta;
8768d75effSDimitry Andric     if (idx == 0) {
8868d75effSDimitry Andric       // Note: don't write to meta in this case -- the block can be huge.
8968d75effSDimitry Andric       continue;
9068d75effSDimitry Andric     }
9168d75effSDimitry Andric     *meta = 0;
9268d75effSDimitry Andric     has_something = true;
9368d75effSDimitry Andric     while (idx != 0) {
9468d75effSDimitry Andric       if (idx & kFlagBlock) {
9568d75effSDimitry Andric         block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
9668d75effSDimitry Andric         break;
9768d75effSDimitry Andric       } else if (idx & kFlagSync) {
9868d75effSDimitry Andric         DCHECK(idx & kFlagSync);
9968d75effSDimitry Andric         SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
10068d75effSDimitry Andric         u32 next = s->next;
10168d75effSDimitry Andric         s->Reset(proc);
10268d75effSDimitry Andric         sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
10368d75effSDimitry Andric         idx = next;
10468d75effSDimitry Andric       } else {
10568d75effSDimitry Andric         CHECK(0);
10668d75effSDimitry Andric       }
10768d75effSDimitry Andric     }
10868d75effSDimitry Andric   }
10968d75effSDimitry Andric   return has_something;
11068d75effSDimitry Andric }
11168d75effSDimitry Andric 
11268d75effSDimitry Andric // ResetRange removes all meta objects from the range.
11368d75effSDimitry Andric // It is called for large mmap-ed regions. The function is best-effort wrt
11468d75effSDimitry Andric // freeing of meta objects, because we don't want to page in the whole range
11568d75effSDimitry Andric // which can be huge. The function probes pages one-by-one until it finds a page
11668d75effSDimitry Andric // without meta objects, at this point it stops freeing meta objects. Because
11768d75effSDimitry Andric // thread stacks grow top-down, we do the same starting from end as well.
11868d75effSDimitry Andric void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
11968d75effSDimitry Andric   if (SANITIZER_GO) {
12068d75effSDimitry Andric     // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
12168d75effSDimitry Andric     // so we do the optimization only for C/C++.
12268d75effSDimitry Andric     FreeRange(proc, p, sz);
12368d75effSDimitry Andric     return;
12468d75effSDimitry Andric   }
12568d75effSDimitry Andric   const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
12668d75effSDimitry Andric   const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
12768d75effSDimitry Andric   if (sz <= 4 * kPageSize) {
12868d75effSDimitry Andric     // If the range is small, just do the normal free procedure.
12968d75effSDimitry Andric     FreeRange(proc, p, sz);
13068d75effSDimitry Andric     return;
13168d75effSDimitry Andric   }
13268d75effSDimitry Andric   // First, round both ends of the range to page size.
13368d75effSDimitry Andric   uptr diff = RoundUp(p, kPageSize) - p;
13468d75effSDimitry Andric   if (diff != 0) {
13568d75effSDimitry Andric     FreeRange(proc, p, diff);
13668d75effSDimitry Andric     p += diff;
13768d75effSDimitry Andric     sz -= diff;
13868d75effSDimitry Andric   }
13968d75effSDimitry Andric   diff = p + sz - RoundDown(p + sz, kPageSize);
14068d75effSDimitry Andric   if (diff != 0) {
14168d75effSDimitry Andric     FreeRange(proc, p + sz - diff, diff);
14268d75effSDimitry Andric     sz -= diff;
14368d75effSDimitry Andric   }
14468d75effSDimitry Andric   // Now we must have a non-empty page-aligned range.
14568d75effSDimitry Andric   CHECK_GT(sz, 0);
14668d75effSDimitry Andric   CHECK_EQ(p, RoundUp(p, kPageSize));
14768d75effSDimitry Andric   CHECK_EQ(sz, RoundUp(sz, kPageSize));
14868d75effSDimitry Andric   const uptr p0 = p;
14968d75effSDimitry Andric   const uptr sz0 = sz;
15068d75effSDimitry Andric   // Probe start of the range.
15168d75effSDimitry Andric   for (uptr checked = 0; sz > 0; checked += kPageSize) {
15268d75effSDimitry Andric     bool has_something = FreeRange(proc, p, kPageSize);
15368d75effSDimitry Andric     p += kPageSize;
15468d75effSDimitry Andric     sz -= kPageSize;
15568d75effSDimitry Andric     if (!has_something && checked > (128 << 10))
15668d75effSDimitry Andric       break;
15768d75effSDimitry Andric   }
15868d75effSDimitry Andric   // Probe end of the range.
15968d75effSDimitry Andric   for (uptr checked = 0; sz > 0; checked += kPageSize) {
16068d75effSDimitry Andric     bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
16168d75effSDimitry Andric     sz -= kPageSize;
16268d75effSDimitry Andric     // Stacks grow down, so sync object are most likely at the end of the region
16368d75effSDimitry Andric     // (if it is a stack). The very end of the stack is TLS and tsan increases
16468d75effSDimitry Andric     // TLS by at least 256K, so check at least 512K.
16568d75effSDimitry Andric     if (!has_something && checked > (512 << 10))
16668d75effSDimitry Andric       break;
16768d75effSDimitry Andric   }
16868d75effSDimitry Andric   // Finally, page out the whole range (including the parts that we've just
16968d75effSDimitry Andric   // freed). Note: we can't simply madvise, because we need to leave a zeroed
17068d75effSDimitry Andric   // range (otherwise __tsan_java_move can crash if it encounters a left-over
17168d75effSDimitry Andric   // meta objects in java heap).
17268d75effSDimitry Andric   uptr metap = (uptr)MemToMeta(p0);
17368d75effSDimitry Andric   uptr metasz = sz0 / kMetaRatio;
17468d75effSDimitry Andric   UnmapOrDie((void*)metap, metasz);
175e8d8bef9SDimitry Andric   if (!MmapFixedSuperNoReserve(metap, metasz))
17668d75effSDimitry Andric     Die();
17768d75effSDimitry Andric }
17868d75effSDimitry Andric 
17968d75effSDimitry Andric MBlock* MetaMap::GetBlock(uptr p) {
18068d75effSDimitry Andric   u32 *meta = MemToMeta(p);
18168d75effSDimitry Andric   u32 idx = *meta;
18268d75effSDimitry Andric   for (;;) {
18368d75effSDimitry Andric     if (idx == 0)
18468d75effSDimitry Andric       return 0;
18568d75effSDimitry Andric     if (idx & kFlagBlock)
18668d75effSDimitry Andric       return block_alloc_.Map(idx & ~kFlagMask);
18768d75effSDimitry Andric     DCHECK(idx & kFlagSync);
18868d75effSDimitry Andric     SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
18968d75effSDimitry Andric     idx = s->next;
19068d75effSDimitry Andric   }
19168d75effSDimitry Andric }
19268d75effSDimitry Andric 
19368d75effSDimitry Andric SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
19468d75effSDimitry Andric                               uptr addr, bool write_lock) {
19568d75effSDimitry Andric   return GetAndLock(thr, pc, addr, write_lock, true);
19668d75effSDimitry Andric }
19768d75effSDimitry Andric 
19868d75effSDimitry Andric SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
19968d75effSDimitry Andric   return GetAndLock(0, 0, addr, write_lock, false);
20068d75effSDimitry Andric }
20168d75effSDimitry Andric 
202*fe6060f1SDimitry Andric SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
203*fe6060f1SDimitry Andric                              bool create) NO_THREAD_SAFETY_ANALYSIS {
20468d75effSDimitry Andric   u32 *meta = MemToMeta(addr);
20568d75effSDimitry Andric   u32 idx0 = *meta;
20668d75effSDimitry Andric   u32 myidx = 0;
20768d75effSDimitry Andric   SyncVar *mys = 0;
20868d75effSDimitry Andric   for (;;) {
20968d75effSDimitry Andric     u32 idx = idx0;
21068d75effSDimitry Andric     for (;;) {
21168d75effSDimitry Andric       if (idx == 0)
21268d75effSDimitry Andric         break;
21368d75effSDimitry Andric       if (idx & kFlagBlock)
21468d75effSDimitry Andric         break;
21568d75effSDimitry Andric       DCHECK(idx & kFlagSync);
21668d75effSDimitry Andric       SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
21768d75effSDimitry Andric       if (s->addr == addr) {
21868d75effSDimitry Andric         if (myidx != 0) {
21968d75effSDimitry Andric           mys->Reset(thr->proc());
22068d75effSDimitry Andric           sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
22168d75effSDimitry Andric         }
22268d75effSDimitry Andric         if (write_lock)
22368d75effSDimitry Andric           s->mtx.Lock();
22468d75effSDimitry Andric         else
22568d75effSDimitry Andric           s->mtx.ReadLock();
22668d75effSDimitry Andric         return s;
22768d75effSDimitry Andric       }
22868d75effSDimitry Andric       idx = s->next;
22968d75effSDimitry Andric     }
23068d75effSDimitry Andric     if (!create)
23168d75effSDimitry Andric       return 0;
23268d75effSDimitry Andric     if (*meta != idx0) {
23368d75effSDimitry Andric       idx0 = *meta;
23468d75effSDimitry Andric       continue;
23568d75effSDimitry Andric     }
23668d75effSDimitry Andric 
23768d75effSDimitry Andric     if (myidx == 0) {
23868d75effSDimitry Andric       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
23968d75effSDimitry Andric       myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
24068d75effSDimitry Andric       mys = sync_alloc_.Map(myidx);
24168d75effSDimitry Andric       mys->Init(thr, pc, addr, uid);
24268d75effSDimitry Andric     }
24368d75effSDimitry Andric     mys->next = idx0;
24468d75effSDimitry Andric     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
24568d75effSDimitry Andric         myidx | kFlagSync, memory_order_release)) {
24668d75effSDimitry Andric       if (write_lock)
24768d75effSDimitry Andric         mys->mtx.Lock();
24868d75effSDimitry Andric       else
24968d75effSDimitry Andric         mys->mtx.ReadLock();
25068d75effSDimitry Andric       return mys;
25168d75effSDimitry Andric     }
25268d75effSDimitry Andric   }
25368d75effSDimitry Andric }
25468d75effSDimitry Andric 
25568d75effSDimitry Andric void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
25668d75effSDimitry Andric   // src and dst can overlap,
25768d75effSDimitry Andric   // there are no concurrent accesses to the regions (e.g. stop-the-world).
25868d75effSDimitry Andric   CHECK_NE(src, dst);
25968d75effSDimitry Andric   CHECK_NE(sz, 0);
26068d75effSDimitry Andric   uptr diff = dst - src;
26168d75effSDimitry Andric   u32 *src_meta = MemToMeta(src);
26268d75effSDimitry Andric   u32 *dst_meta = MemToMeta(dst);
26368d75effSDimitry Andric   u32 *src_meta_end = MemToMeta(src + sz);
26468d75effSDimitry Andric   uptr inc = 1;
26568d75effSDimitry Andric   if (dst > src) {
26668d75effSDimitry Andric     src_meta = MemToMeta(src + sz) - 1;
26768d75effSDimitry Andric     dst_meta = MemToMeta(dst + sz) - 1;
26868d75effSDimitry Andric     src_meta_end = MemToMeta(src) - 1;
26968d75effSDimitry Andric     inc = -1;
27068d75effSDimitry Andric   }
27168d75effSDimitry Andric   for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
27268d75effSDimitry Andric     CHECK_EQ(*dst_meta, 0);
27368d75effSDimitry Andric     u32 idx = *src_meta;
27468d75effSDimitry Andric     *src_meta = 0;
27568d75effSDimitry Andric     *dst_meta = idx;
27668d75effSDimitry Andric     // Patch the addresses in sync objects.
27768d75effSDimitry Andric     while (idx != 0) {
27868d75effSDimitry Andric       if (idx & kFlagBlock)
27968d75effSDimitry Andric         break;
28068d75effSDimitry Andric       CHECK(idx & kFlagSync);
28168d75effSDimitry Andric       SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
28268d75effSDimitry Andric       s->addr += diff;
28368d75effSDimitry Andric       idx = s->next;
28468d75effSDimitry Andric     }
28568d75effSDimitry Andric   }
28668d75effSDimitry Andric }
28768d75effSDimitry Andric 
28868d75effSDimitry Andric void MetaMap::OnProcIdle(Processor *proc) {
28968d75effSDimitry Andric   block_alloc_.FlushCache(&proc->block_cache);
29068d75effSDimitry Andric   sync_alloc_.FlushCache(&proc->sync_cache);
29168d75effSDimitry Andric }
29268d75effSDimitry Andric 
29368d75effSDimitry Andric }  // namespace __tsan
294