168d75effSDimitry Andric //===-- tsan_sync.cpp -----------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
1068d75effSDimitry Andric //
1168d75effSDimitry Andric //===----------------------------------------------------------------------===//
1268d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
1368d75effSDimitry Andric #include "tsan_sync.h"
1468d75effSDimitry Andric #include "tsan_rtl.h"
1568d75effSDimitry Andric #include "tsan_mman.h"
1668d75effSDimitry Andric
1768d75effSDimitry Andric namespace __tsan {
1868d75effSDimitry Andric
1968d75effSDimitry Andric void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
2068d75effSDimitry Andric
SyncVar()21*0eae32dcSDimitry Andric SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(); }
2268d75effSDimitry Andric
Init(ThreadState * thr,uptr pc,uptr addr,bool save_stack)23*0eae32dcSDimitry Andric void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, bool save_stack) {
24*0eae32dcSDimitry Andric Reset();
2568d75effSDimitry Andric this->addr = addr;
26*0eae32dcSDimitry Andric next = 0;
27349cc55cSDimitry Andric if (save_stack && !SANITIZER_GO) // Go does not use them
2868d75effSDimitry Andric creation_stack_id = CurrentStackId(thr, pc);
2968d75effSDimitry Andric if (common_flags()->detect_deadlocks)
3068d75effSDimitry Andric DDMutexInit(thr, pc, this);
3168d75effSDimitry Andric }
3268d75effSDimitry Andric
Reset()33*0eae32dcSDimitry Andric void SyncVar::Reset() {
34*0eae32dcSDimitry Andric CHECK(!ctx->resetting);
35349cc55cSDimitry Andric creation_stack_id = kInvalidStackID;
3668d75effSDimitry Andric owner_tid = kInvalidTid;
37*0eae32dcSDimitry Andric last_lock.Reset();
3868d75effSDimitry Andric recursion = 0;
3968d75effSDimitry Andric atomic_store_relaxed(&flags, 0);
40*0eae32dcSDimitry Andric Free(clock);
41*0eae32dcSDimitry Andric Free(read_clock);
4268d75effSDimitry Andric }
4368d75effSDimitry Andric
MetaMap()4468d75effSDimitry Andric MetaMap::MetaMap()
45*0eae32dcSDimitry Andric : block_alloc_("heap block allocator"), sync_alloc_("sync allocator") {}
4668d75effSDimitry Andric
AllocBlock(ThreadState * thr,uptr pc,uptr p,uptr sz)4768d75effSDimitry Andric void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
4868d75effSDimitry Andric u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
4968d75effSDimitry Andric MBlock *b = block_alloc_.Map(idx);
5068d75effSDimitry Andric b->siz = sz;
5168d75effSDimitry Andric b->tag = 0;
5268d75effSDimitry Andric b->tid = thr->tid;
5368d75effSDimitry Andric b->stk = CurrentStackId(thr, pc);
5468d75effSDimitry Andric u32 *meta = MemToMeta(p);
5568d75effSDimitry Andric DCHECK_EQ(*meta, 0);
5668d75effSDimitry Andric *meta = idx | kFlagBlock;
5768d75effSDimitry Andric }
5868d75effSDimitry Andric
FreeBlock(Processor * proc,uptr p,bool reset)59*0eae32dcSDimitry Andric uptr MetaMap::FreeBlock(Processor *proc, uptr p, bool reset) {
6068d75effSDimitry Andric MBlock* b = GetBlock(p);
6168d75effSDimitry Andric if (b == 0)
6268d75effSDimitry Andric return 0;
6368d75effSDimitry Andric uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
64*0eae32dcSDimitry Andric FreeRange(proc, p, sz, reset);
6568d75effSDimitry Andric return sz;
6668d75effSDimitry Andric }
6768d75effSDimitry Andric
FreeRange(Processor * proc,uptr p,uptr sz,bool reset)68*0eae32dcSDimitry Andric bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz, bool reset) {
6968d75effSDimitry Andric bool has_something = false;
7068d75effSDimitry Andric u32 *meta = MemToMeta(p);
7168d75effSDimitry Andric u32 *end = MemToMeta(p + sz);
7268d75effSDimitry Andric if (end == meta)
7368d75effSDimitry Andric end++;
7468d75effSDimitry Andric for (; meta < end; meta++) {
7568d75effSDimitry Andric u32 idx = *meta;
7668d75effSDimitry Andric if (idx == 0) {
7768d75effSDimitry Andric // Note: don't write to meta in this case -- the block can be huge.
7868d75effSDimitry Andric continue;
7968d75effSDimitry Andric }
8068d75effSDimitry Andric *meta = 0;
8168d75effSDimitry Andric has_something = true;
8268d75effSDimitry Andric while (idx != 0) {
8368d75effSDimitry Andric if (idx & kFlagBlock) {
8468d75effSDimitry Andric block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
8568d75effSDimitry Andric break;
8668d75effSDimitry Andric } else if (idx & kFlagSync) {
8768d75effSDimitry Andric DCHECK(idx & kFlagSync);
8868d75effSDimitry Andric SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
8968d75effSDimitry Andric u32 next = s->next;
90*0eae32dcSDimitry Andric if (reset)
91*0eae32dcSDimitry Andric s->Reset();
9268d75effSDimitry Andric sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
9368d75effSDimitry Andric idx = next;
9468d75effSDimitry Andric } else {
9568d75effSDimitry Andric CHECK(0);
9668d75effSDimitry Andric }
9768d75effSDimitry Andric }
9868d75effSDimitry Andric }
9968d75effSDimitry Andric return has_something;
10068d75effSDimitry Andric }
10168d75effSDimitry Andric
10268d75effSDimitry Andric // ResetRange removes all meta objects from the range.
10368d75effSDimitry Andric // It is called for large mmap-ed regions. The function is best-effort wrt
10468d75effSDimitry Andric // freeing of meta objects, because we don't want to page in the whole range
10568d75effSDimitry Andric // which can be huge. The function probes pages one-by-one until it finds a page
10668d75effSDimitry Andric // without meta objects, at this point it stops freeing meta objects. Because
10768d75effSDimitry Andric // thread stacks grow top-down, we do the same starting from end as well.
ResetRange(Processor * proc,uptr p,uptr sz,bool reset)108*0eae32dcSDimitry Andric void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz, bool reset) {
10968d75effSDimitry Andric if (SANITIZER_GO) {
11068d75effSDimitry Andric // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
11168d75effSDimitry Andric // so we do the optimization only for C/C++.
112*0eae32dcSDimitry Andric FreeRange(proc, p, sz, reset);
11368d75effSDimitry Andric return;
11468d75effSDimitry Andric }
11568d75effSDimitry Andric const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
11668d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
11768d75effSDimitry Andric if (sz <= 4 * kPageSize) {
11868d75effSDimitry Andric // If the range is small, just do the normal free procedure.
119*0eae32dcSDimitry Andric FreeRange(proc, p, sz, reset);
12068d75effSDimitry Andric return;
12168d75effSDimitry Andric }
12268d75effSDimitry Andric // First, round both ends of the range to page size.
12368d75effSDimitry Andric uptr diff = RoundUp(p, kPageSize) - p;
12468d75effSDimitry Andric if (diff != 0) {
125*0eae32dcSDimitry Andric FreeRange(proc, p, diff, reset);
12668d75effSDimitry Andric p += diff;
12768d75effSDimitry Andric sz -= diff;
12868d75effSDimitry Andric }
12968d75effSDimitry Andric diff = p + sz - RoundDown(p + sz, kPageSize);
13068d75effSDimitry Andric if (diff != 0) {
131*0eae32dcSDimitry Andric FreeRange(proc, p + sz - diff, diff, reset);
13268d75effSDimitry Andric sz -= diff;
13368d75effSDimitry Andric }
13468d75effSDimitry Andric // Now we must have a non-empty page-aligned range.
13568d75effSDimitry Andric CHECK_GT(sz, 0);
13668d75effSDimitry Andric CHECK_EQ(p, RoundUp(p, kPageSize));
13768d75effSDimitry Andric CHECK_EQ(sz, RoundUp(sz, kPageSize));
13868d75effSDimitry Andric const uptr p0 = p;
13968d75effSDimitry Andric const uptr sz0 = sz;
14068d75effSDimitry Andric // Probe start of the range.
14168d75effSDimitry Andric for (uptr checked = 0; sz > 0; checked += kPageSize) {
142*0eae32dcSDimitry Andric bool has_something = FreeRange(proc, p, kPageSize, reset);
14368d75effSDimitry Andric p += kPageSize;
14468d75effSDimitry Andric sz -= kPageSize;
14568d75effSDimitry Andric if (!has_something && checked > (128 << 10))
14668d75effSDimitry Andric break;
14768d75effSDimitry Andric }
14868d75effSDimitry Andric // Probe end of the range.
14968d75effSDimitry Andric for (uptr checked = 0; sz > 0; checked += kPageSize) {
150*0eae32dcSDimitry Andric bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize, reset);
15168d75effSDimitry Andric sz -= kPageSize;
15268d75effSDimitry Andric // Stacks grow down, so sync object are most likely at the end of the region
15368d75effSDimitry Andric // (if it is a stack). The very end of the stack is TLS and tsan increases
15468d75effSDimitry Andric // TLS by at least 256K, so check at least 512K.
15568d75effSDimitry Andric if (!has_something && checked > (512 << 10))
15668d75effSDimitry Andric break;
15768d75effSDimitry Andric }
15868d75effSDimitry Andric // Finally, page out the whole range (including the parts that we've just
15968d75effSDimitry Andric // freed). Note: we can't simply madvise, because we need to leave a zeroed
16068d75effSDimitry Andric // range (otherwise __tsan_java_move can crash if it encounters a left-over
16168d75effSDimitry Andric // meta objects in java heap).
16268d75effSDimitry Andric uptr metap = (uptr)MemToMeta(p0);
16368d75effSDimitry Andric uptr metasz = sz0 / kMetaRatio;
16468d75effSDimitry Andric UnmapOrDie((void*)metap, metasz);
165e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(metap, metasz))
16668d75effSDimitry Andric Die();
16768d75effSDimitry Andric }
16868d75effSDimitry Andric
ResetClocks()169*0eae32dcSDimitry Andric void MetaMap::ResetClocks() {
170*0eae32dcSDimitry Andric // This can be called from the background thread
171*0eae32dcSDimitry Andric // which does not have proc/cache.
172*0eae32dcSDimitry Andric // The cache is too large for stack.
173*0eae32dcSDimitry Andric static InternalAllocatorCache cache;
174*0eae32dcSDimitry Andric internal_memset(&cache, 0, sizeof(cache));
175*0eae32dcSDimitry Andric internal_allocator()->InitCache(&cache);
176*0eae32dcSDimitry Andric sync_alloc_.ForEach([&](SyncVar *s) {
177*0eae32dcSDimitry Andric if (s->clock) {
178*0eae32dcSDimitry Andric InternalFree(s->clock, &cache);
179*0eae32dcSDimitry Andric s->clock = nullptr;
180*0eae32dcSDimitry Andric }
181*0eae32dcSDimitry Andric if (s->read_clock) {
182*0eae32dcSDimitry Andric InternalFree(s->read_clock, &cache);
183*0eae32dcSDimitry Andric s->read_clock = nullptr;
184*0eae32dcSDimitry Andric }
185*0eae32dcSDimitry Andric s->last_lock.Reset();
186*0eae32dcSDimitry Andric });
187*0eae32dcSDimitry Andric internal_allocator()->DestroyCache(&cache);
188*0eae32dcSDimitry Andric }
189*0eae32dcSDimitry Andric
GetBlock(uptr p)19068d75effSDimitry Andric MBlock* MetaMap::GetBlock(uptr p) {
19168d75effSDimitry Andric u32 *meta = MemToMeta(p);
19268d75effSDimitry Andric u32 idx = *meta;
19368d75effSDimitry Andric for (;;) {
19468d75effSDimitry Andric if (idx == 0)
19568d75effSDimitry Andric return 0;
19668d75effSDimitry Andric if (idx & kFlagBlock)
19768d75effSDimitry Andric return block_alloc_.Map(idx & ~kFlagMask);
19868d75effSDimitry Andric DCHECK(idx & kFlagSync);
19968d75effSDimitry Andric SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
20068d75effSDimitry Andric idx = s->next;
20168d75effSDimitry Andric }
20268d75effSDimitry Andric }
20368d75effSDimitry Andric
GetSync(ThreadState * thr,uptr pc,uptr addr,bool create,bool save_stack)204349cc55cSDimitry Andric SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
205349cc55cSDimitry Andric bool save_stack) {
206*0eae32dcSDimitry Andric DCHECK(!create || thr->slot_locked);
20768d75effSDimitry Andric u32 *meta = MemToMeta(addr);
20868d75effSDimitry Andric u32 idx0 = *meta;
20968d75effSDimitry Andric u32 myidx = 0;
210349cc55cSDimitry Andric SyncVar *mys = nullptr;
21168d75effSDimitry Andric for (;;) {
212349cc55cSDimitry Andric for (u32 idx = idx0; idx && !(idx & kFlagBlock);) {
21368d75effSDimitry Andric DCHECK(idx & kFlagSync);
21468d75effSDimitry Andric SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
215349cc55cSDimitry Andric if (LIKELY(s->addr == addr)) {
216349cc55cSDimitry Andric if (UNLIKELY(myidx != 0)) {
217*0eae32dcSDimitry Andric mys->Reset();
21868d75effSDimitry Andric sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
21968d75effSDimitry Andric }
22068d75effSDimitry Andric return s;
22168d75effSDimitry Andric }
22268d75effSDimitry Andric idx = s->next;
22368d75effSDimitry Andric }
22468d75effSDimitry Andric if (!create)
225349cc55cSDimitry Andric return nullptr;
226349cc55cSDimitry Andric if (UNLIKELY(*meta != idx0)) {
22768d75effSDimitry Andric idx0 = *meta;
22868d75effSDimitry Andric continue;
22968d75effSDimitry Andric }
23068d75effSDimitry Andric
231349cc55cSDimitry Andric if (LIKELY(myidx == 0)) {
23268d75effSDimitry Andric myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
23368d75effSDimitry Andric mys = sync_alloc_.Map(myidx);
234*0eae32dcSDimitry Andric mys->Init(thr, pc, addr, save_stack);
23568d75effSDimitry Andric }
23668d75effSDimitry Andric mys->next = idx0;
23768d75effSDimitry Andric if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
23868d75effSDimitry Andric myidx | kFlagSync, memory_order_release)) {
23968d75effSDimitry Andric return mys;
24068d75effSDimitry Andric }
24168d75effSDimitry Andric }
24268d75effSDimitry Andric }
24368d75effSDimitry Andric
MoveMemory(uptr src,uptr dst,uptr sz)24468d75effSDimitry Andric void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
24568d75effSDimitry Andric // src and dst can overlap,
24668d75effSDimitry Andric // there are no concurrent accesses to the regions (e.g. stop-the-world).
24768d75effSDimitry Andric CHECK_NE(src, dst);
24868d75effSDimitry Andric CHECK_NE(sz, 0);
24968d75effSDimitry Andric uptr diff = dst - src;
25068d75effSDimitry Andric u32 *src_meta = MemToMeta(src);
25168d75effSDimitry Andric u32 *dst_meta = MemToMeta(dst);
25268d75effSDimitry Andric u32 *src_meta_end = MemToMeta(src + sz);
25368d75effSDimitry Andric uptr inc = 1;
25468d75effSDimitry Andric if (dst > src) {
25568d75effSDimitry Andric src_meta = MemToMeta(src + sz) - 1;
25668d75effSDimitry Andric dst_meta = MemToMeta(dst + sz) - 1;
25768d75effSDimitry Andric src_meta_end = MemToMeta(src) - 1;
25868d75effSDimitry Andric inc = -1;
25968d75effSDimitry Andric }
26068d75effSDimitry Andric for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
26168d75effSDimitry Andric CHECK_EQ(*dst_meta, 0);
26268d75effSDimitry Andric u32 idx = *src_meta;
26368d75effSDimitry Andric *src_meta = 0;
26468d75effSDimitry Andric *dst_meta = idx;
26568d75effSDimitry Andric // Patch the addresses in sync objects.
26668d75effSDimitry Andric while (idx != 0) {
26768d75effSDimitry Andric if (idx & kFlagBlock)
26868d75effSDimitry Andric break;
26968d75effSDimitry Andric CHECK(idx & kFlagSync);
27068d75effSDimitry Andric SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
27168d75effSDimitry Andric s->addr += diff;
27268d75effSDimitry Andric idx = s->next;
27368d75effSDimitry Andric }
27468d75effSDimitry Andric }
27568d75effSDimitry Andric }
27668d75effSDimitry Andric
OnProcIdle(Processor * proc)27768d75effSDimitry Andric void MetaMap::OnProcIdle(Processor *proc) {
27868d75effSDimitry Andric block_alloc_.FlushCache(&proc->block_cache);
27968d75effSDimitry Andric sync_alloc_.FlushCache(&proc->sync_cache);
28068d75effSDimitry Andric }
28168d75effSDimitry Andric
GetMemoryStats() const282349cc55cSDimitry Andric MetaMap::MemoryStats MetaMap::GetMemoryStats() const {
283349cc55cSDimitry Andric MemoryStats stats;
284349cc55cSDimitry Andric stats.mem_block = block_alloc_.AllocatedMemory();
285349cc55cSDimitry Andric stats.sync_obj = sync_alloc_.AllocatedMemory();
286349cc55cSDimitry Andric return stats;
287349cc55cSDimitry Andric }
288349cc55cSDimitry Andric
28968d75effSDimitry Andric } // namespace __tsan
290