xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp (revision 68d75eff68281c1b445e3010bb975eae07aac225)
1*68d75effSDimitry Andric //===-- tsan_sync.cpp -----------------------------------------------------===//
2*68d75effSDimitry Andric //
3*68d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*68d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*68d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*68d75effSDimitry Andric //
7*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
8*68d75effSDimitry Andric //
9*68d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
10*68d75effSDimitry Andric //
11*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
12*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
13*68d75effSDimitry Andric #include "tsan_sync.h"
14*68d75effSDimitry Andric #include "tsan_rtl.h"
15*68d75effSDimitry Andric #include "tsan_mman.h"
16*68d75effSDimitry Andric 
17*68d75effSDimitry Andric namespace __tsan {
18*68d75effSDimitry Andric 
19*68d75effSDimitry Andric void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
20*68d75effSDimitry Andric 
21*68d75effSDimitry Andric SyncVar::SyncVar()
22*68d75effSDimitry Andric     : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
23*68d75effSDimitry Andric   Reset(0);
24*68d75effSDimitry Andric }
25*68d75effSDimitry Andric 
26*68d75effSDimitry Andric void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
27*68d75effSDimitry Andric   this->addr = addr;
28*68d75effSDimitry Andric   this->uid = uid;
29*68d75effSDimitry Andric   this->next = 0;
30*68d75effSDimitry Andric 
31*68d75effSDimitry Andric   creation_stack_id = 0;
32*68d75effSDimitry Andric   if (!SANITIZER_GO)  // Go does not use them
33*68d75effSDimitry Andric     creation_stack_id = CurrentStackId(thr, pc);
34*68d75effSDimitry Andric   if (common_flags()->detect_deadlocks)
35*68d75effSDimitry Andric     DDMutexInit(thr, pc, this);
36*68d75effSDimitry Andric }
37*68d75effSDimitry Andric 
38*68d75effSDimitry Andric void SyncVar::Reset(Processor *proc) {
39*68d75effSDimitry Andric   uid = 0;
40*68d75effSDimitry Andric   creation_stack_id = 0;
41*68d75effSDimitry Andric   owner_tid = kInvalidTid;
42*68d75effSDimitry Andric   last_lock = 0;
43*68d75effSDimitry Andric   recursion = 0;
44*68d75effSDimitry Andric   atomic_store_relaxed(&flags, 0);
45*68d75effSDimitry Andric 
46*68d75effSDimitry Andric   if (proc == 0) {
47*68d75effSDimitry Andric     CHECK_EQ(clock.size(), 0);
48*68d75effSDimitry Andric     CHECK_EQ(read_clock.size(), 0);
49*68d75effSDimitry Andric   } else {
50*68d75effSDimitry Andric     clock.Reset(&proc->clock_cache);
51*68d75effSDimitry Andric     read_clock.Reset(&proc->clock_cache);
52*68d75effSDimitry Andric   }
53*68d75effSDimitry Andric }
54*68d75effSDimitry Andric 
55*68d75effSDimitry Andric MetaMap::MetaMap()
56*68d75effSDimitry Andric     : block_alloc_("heap block allocator")
57*68d75effSDimitry Andric     , sync_alloc_("sync allocator") {
58*68d75effSDimitry Andric   atomic_store(&uid_gen_, 0, memory_order_relaxed);
59*68d75effSDimitry Andric }
60*68d75effSDimitry Andric 
61*68d75effSDimitry Andric void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
62*68d75effSDimitry Andric   u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
63*68d75effSDimitry Andric   MBlock *b = block_alloc_.Map(idx);
64*68d75effSDimitry Andric   b->siz = sz;
65*68d75effSDimitry Andric   b->tag = 0;
66*68d75effSDimitry Andric   b->tid = thr->tid;
67*68d75effSDimitry Andric   b->stk = CurrentStackId(thr, pc);
68*68d75effSDimitry Andric   u32 *meta = MemToMeta(p);
69*68d75effSDimitry Andric   DCHECK_EQ(*meta, 0);
70*68d75effSDimitry Andric   *meta = idx | kFlagBlock;
71*68d75effSDimitry Andric }
72*68d75effSDimitry Andric 
73*68d75effSDimitry Andric uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
74*68d75effSDimitry Andric   MBlock* b = GetBlock(p);
75*68d75effSDimitry Andric   if (b == 0)
76*68d75effSDimitry Andric     return 0;
77*68d75effSDimitry Andric   uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
78*68d75effSDimitry Andric   FreeRange(proc, p, sz);
79*68d75effSDimitry Andric   return sz;
80*68d75effSDimitry Andric }
81*68d75effSDimitry Andric 
82*68d75effSDimitry Andric bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
83*68d75effSDimitry Andric   bool has_something = false;
84*68d75effSDimitry Andric   u32 *meta = MemToMeta(p);
85*68d75effSDimitry Andric   u32 *end = MemToMeta(p + sz);
86*68d75effSDimitry Andric   if (end == meta)
87*68d75effSDimitry Andric     end++;
88*68d75effSDimitry Andric   for (; meta < end; meta++) {
89*68d75effSDimitry Andric     u32 idx = *meta;
90*68d75effSDimitry Andric     if (idx == 0) {
91*68d75effSDimitry Andric       // Note: don't write to meta in this case -- the block can be huge.
92*68d75effSDimitry Andric       continue;
93*68d75effSDimitry Andric     }
94*68d75effSDimitry Andric     *meta = 0;
95*68d75effSDimitry Andric     has_something = true;
96*68d75effSDimitry Andric     while (idx != 0) {
97*68d75effSDimitry Andric       if (idx & kFlagBlock) {
98*68d75effSDimitry Andric         block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
99*68d75effSDimitry Andric         break;
100*68d75effSDimitry Andric       } else if (idx & kFlagSync) {
101*68d75effSDimitry Andric         DCHECK(idx & kFlagSync);
102*68d75effSDimitry Andric         SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
103*68d75effSDimitry Andric         u32 next = s->next;
104*68d75effSDimitry Andric         s->Reset(proc);
105*68d75effSDimitry Andric         sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
106*68d75effSDimitry Andric         idx = next;
107*68d75effSDimitry Andric       } else {
108*68d75effSDimitry Andric         CHECK(0);
109*68d75effSDimitry Andric       }
110*68d75effSDimitry Andric     }
111*68d75effSDimitry Andric   }
112*68d75effSDimitry Andric   return has_something;
113*68d75effSDimitry Andric }
114*68d75effSDimitry Andric 
115*68d75effSDimitry Andric // ResetRange removes all meta objects from the range.
116*68d75effSDimitry Andric // It is called for large mmap-ed regions. The function is best-effort wrt
117*68d75effSDimitry Andric // freeing of meta objects, because we don't want to page in the whole range
118*68d75effSDimitry Andric // which can be huge. The function probes pages one-by-one until it finds a page
119*68d75effSDimitry Andric // without meta objects, at this point it stops freeing meta objects. Because
120*68d75effSDimitry Andric // thread stacks grow top-down, we do the same starting from end as well.
121*68d75effSDimitry Andric void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
122*68d75effSDimitry Andric   if (SANITIZER_GO) {
123*68d75effSDimitry Andric     // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
124*68d75effSDimitry Andric     // so we do the optimization only for C/C++.
125*68d75effSDimitry Andric     FreeRange(proc, p, sz);
126*68d75effSDimitry Andric     return;
127*68d75effSDimitry Andric   }
128*68d75effSDimitry Andric   const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
129*68d75effSDimitry Andric   const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
130*68d75effSDimitry Andric   if (sz <= 4 * kPageSize) {
131*68d75effSDimitry Andric     // If the range is small, just do the normal free procedure.
132*68d75effSDimitry Andric     FreeRange(proc, p, sz);
133*68d75effSDimitry Andric     return;
134*68d75effSDimitry Andric   }
135*68d75effSDimitry Andric   // First, round both ends of the range to page size.
136*68d75effSDimitry Andric   uptr diff = RoundUp(p, kPageSize) - p;
137*68d75effSDimitry Andric   if (diff != 0) {
138*68d75effSDimitry Andric     FreeRange(proc, p, diff);
139*68d75effSDimitry Andric     p += diff;
140*68d75effSDimitry Andric     sz -= diff;
141*68d75effSDimitry Andric   }
142*68d75effSDimitry Andric   diff = p + sz - RoundDown(p + sz, kPageSize);
143*68d75effSDimitry Andric   if (diff != 0) {
144*68d75effSDimitry Andric     FreeRange(proc, p + sz - diff, diff);
145*68d75effSDimitry Andric     sz -= diff;
146*68d75effSDimitry Andric   }
147*68d75effSDimitry Andric   // Now we must have a non-empty page-aligned range.
148*68d75effSDimitry Andric   CHECK_GT(sz, 0);
149*68d75effSDimitry Andric   CHECK_EQ(p, RoundUp(p, kPageSize));
150*68d75effSDimitry Andric   CHECK_EQ(sz, RoundUp(sz, kPageSize));
151*68d75effSDimitry Andric   const uptr p0 = p;
152*68d75effSDimitry Andric   const uptr sz0 = sz;
153*68d75effSDimitry Andric   // Probe start of the range.
154*68d75effSDimitry Andric   for (uptr checked = 0; sz > 0; checked += kPageSize) {
155*68d75effSDimitry Andric     bool has_something = FreeRange(proc, p, kPageSize);
156*68d75effSDimitry Andric     p += kPageSize;
157*68d75effSDimitry Andric     sz -= kPageSize;
158*68d75effSDimitry Andric     if (!has_something && checked > (128 << 10))
159*68d75effSDimitry Andric       break;
160*68d75effSDimitry Andric   }
161*68d75effSDimitry Andric   // Probe end of the range.
162*68d75effSDimitry Andric   for (uptr checked = 0; sz > 0; checked += kPageSize) {
163*68d75effSDimitry Andric     bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
164*68d75effSDimitry Andric     sz -= kPageSize;
165*68d75effSDimitry Andric     // Stacks grow down, so sync object are most likely at the end of the region
166*68d75effSDimitry Andric     // (if it is a stack). The very end of the stack is TLS and tsan increases
167*68d75effSDimitry Andric     // TLS by at least 256K, so check at least 512K.
168*68d75effSDimitry Andric     if (!has_something && checked > (512 << 10))
169*68d75effSDimitry Andric       break;
170*68d75effSDimitry Andric   }
171*68d75effSDimitry Andric   // Finally, page out the whole range (including the parts that we've just
172*68d75effSDimitry Andric   // freed). Note: we can't simply madvise, because we need to leave a zeroed
173*68d75effSDimitry Andric   // range (otherwise __tsan_java_move can crash if it encounters a left-over
174*68d75effSDimitry Andric   // meta objects in java heap).
175*68d75effSDimitry Andric   uptr metap = (uptr)MemToMeta(p0);
176*68d75effSDimitry Andric   uptr metasz = sz0 / kMetaRatio;
177*68d75effSDimitry Andric   UnmapOrDie((void*)metap, metasz);
178*68d75effSDimitry Andric   if (!MmapFixedNoReserve(metap, metasz))
179*68d75effSDimitry Andric     Die();
180*68d75effSDimitry Andric }
181*68d75effSDimitry Andric 
182*68d75effSDimitry Andric MBlock* MetaMap::GetBlock(uptr p) {
183*68d75effSDimitry Andric   u32 *meta = MemToMeta(p);
184*68d75effSDimitry Andric   u32 idx = *meta;
185*68d75effSDimitry Andric   for (;;) {
186*68d75effSDimitry Andric     if (idx == 0)
187*68d75effSDimitry Andric       return 0;
188*68d75effSDimitry Andric     if (idx & kFlagBlock)
189*68d75effSDimitry Andric       return block_alloc_.Map(idx & ~kFlagMask);
190*68d75effSDimitry Andric     DCHECK(idx & kFlagSync);
191*68d75effSDimitry Andric     SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
192*68d75effSDimitry Andric     idx = s->next;
193*68d75effSDimitry Andric   }
194*68d75effSDimitry Andric }
195*68d75effSDimitry Andric 
196*68d75effSDimitry Andric SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
197*68d75effSDimitry Andric                               uptr addr, bool write_lock) {
198*68d75effSDimitry Andric   return GetAndLock(thr, pc, addr, write_lock, true);
199*68d75effSDimitry Andric }
200*68d75effSDimitry Andric 
201*68d75effSDimitry Andric SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
202*68d75effSDimitry Andric   return GetAndLock(0, 0, addr, write_lock, false);
203*68d75effSDimitry Andric }
204*68d75effSDimitry Andric 
205*68d75effSDimitry Andric SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
206*68d75effSDimitry Andric                              uptr addr, bool write_lock, bool create) {
207*68d75effSDimitry Andric   u32 *meta = MemToMeta(addr);
208*68d75effSDimitry Andric   u32 idx0 = *meta;
209*68d75effSDimitry Andric   u32 myidx = 0;
210*68d75effSDimitry Andric   SyncVar *mys = 0;
211*68d75effSDimitry Andric   for (;;) {
212*68d75effSDimitry Andric     u32 idx = idx0;
213*68d75effSDimitry Andric     for (;;) {
214*68d75effSDimitry Andric       if (idx == 0)
215*68d75effSDimitry Andric         break;
216*68d75effSDimitry Andric       if (idx & kFlagBlock)
217*68d75effSDimitry Andric         break;
218*68d75effSDimitry Andric       DCHECK(idx & kFlagSync);
219*68d75effSDimitry Andric       SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
220*68d75effSDimitry Andric       if (s->addr == addr) {
221*68d75effSDimitry Andric         if (myidx != 0) {
222*68d75effSDimitry Andric           mys->Reset(thr->proc());
223*68d75effSDimitry Andric           sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
224*68d75effSDimitry Andric         }
225*68d75effSDimitry Andric         if (write_lock)
226*68d75effSDimitry Andric           s->mtx.Lock();
227*68d75effSDimitry Andric         else
228*68d75effSDimitry Andric           s->mtx.ReadLock();
229*68d75effSDimitry Andric         return s;
230*68d75effSDimitry Andric       }
231*68d75effSDimitry Andric       idx = s->next;
232*68d75effSDimitry Andric     }
233*68d75effSDimitry Andric     if (!create)
234*68d75effSDimitry Andric       return 0;
235*68d75effSDimitry Andric     if (*meta != idx0) {
236*68d75effSDimitry Andric       idx0 = *meta;
237*68d75effSDimitry Andric       continue;
238*68d75effSDimitry Andric     }
239*68d75effSDimitry Andric 
240*68d75effSDimitry Andric     if (myidx == 0) {
241*68d75effSDimitry Andric       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
242*68d75effSDimitry Andric       myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
243*68d75effSDimitry Andric       mys = sync_alloc_.Map(myidx);
244*68d75effSDimitry Andric       mys->Init(thr, pc, addr, uid);
245*68d75effSDimitry Andric     }
246*68d75effSDimitry Andric     mys->next = idx0;
247*68d75effSDimitry Andric     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
248*68d75effSDimitry Andric         myidx | kFlagSync, memory_order_release)) {
249*68d75effSDimitry Andric       if (write_lock)
250*68d75effSDimitry Andric         mys->mtx.Lock();
251*68d75effSDimitry Andric       else
252*68d75effSDimitry Andric         mys->mtx.ReadLock();
253*68d75effSDimitry Andric       return mys;
254*68d75effSDimitry Andric     }
255*68d75effSDimitry Andric   }
256*68d75effSDimitry Andric }
257*68d75effSDimitry Andric 
258*68d75effSDimitry Andric void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
259*68d75effSDimitry Andric   // src and dst can overlap,
260*68d75effSDimitry Andric   // there are no concurrent accesses to the regions (e.g. stop-the-world).
261*68d75effSDimitry Andric   CHECK_NE(src, dst);
262*68d75effSDimitry Andric   CHECK_NE(sz, 0);
263*68d75effSDimitry Andric   uptr diff = dst - src;
264*68d75effSDimitry Andric   u32 *src_meta = MemToMeta(src);
265*68d75effSDimitry Andric   u32 *dst_meta = MemToMeta(dst);
266*68d75effSDimitry Andric   u32 *src_meta_end = MemToMeta(src + sz);
267*68d75effSDimitry Andric   uptr inc = 1;
268*68d75effSDimitry Andric   if (dst > src) {
269*68d75effSDimitry Andric     src_meta = MemToMeta(src + sz) - 1;
270*68d75effSDimitry Andric     dst_meta = MemToMeta(dst + sz) - 1;
271*68d75effSDimitry Andric     src_meta_end = MemToMeta(src) - 1;
272*68d75effSDimitry Andric     inc = -1;
273*68d75effSDimitry Andric   }
274*68d75effSDimitry Andric   for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
275*68d75effSDimitry Andric     CHECK_EQ(*dst_meta, 0);
276*68d75effSDimitry Andric     u32 idx = *src_meta;
277*68d75effSDimitry Andric     *src_meta = 0;
278*68d75effSDimitry Andric     *dst_meta = idx;
279*68d75effSDimitry Andric     // Patch the addresses in sync objects.
280*68d75effSDimitry Andric     while (idx != 0) {
281*68d75effSDimitry Andric       if (idx & kFlagBlock)
282*68d75effSDimitry Andric         break;
283*68d75effSDimitry Andric       CHECK(idx & kFlagSync);
284*68d75effSDimitry Andric       SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
285*68d75effSDimitry Andric       s->addr += diff;
286*68d75effSDimitry Andric       idx = s->next;
287*68d75effSDimitry Andric     }
288*68d75effSDimitry Andric   }
289*68d75effSDimitry Andric }
290*68d75effSDimitry Andric 
291*68d75effSDimitry Andric void MetaMap::OnProcIdle(Processor *proc) {
292*68d75effSDimitry Andric   block_alloc_.FlushCache(&proc->block_cache);
293*68d75effSDimitry Andric   sync_alloc_.FlushCache(&proc->sync_cache);
294*68d75effSDimitry Andric }
295*68d75effSDimitry Andric 
296*68d75effSDimitry Andric }  // namespace __tsan
297