xref: /openbsd-src/gnu/llvm/compiler-rt/lib/tsan/tests/unit/tsan_trace_test.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
1*810390e3Srobert //===-- tsan_trace_test.cpp -----------------------------------------------===//
2*810390e3Srobert //
3*810390e3Srobert // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*810390e3Srobert // See https://llvm.org/LICENSE.txt for license information.
5*810390e3Srobert // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*810390e3Srobert //
7*810390e3Srobert //===----------------------------------------------------------------------===//
8*810390e3Srobert //
9*810390e3Srobert // This file is a part of ThreadSanitizer (TSan), a race detector.
10*810390e3Srobert //
11*810390e3Srobert //===----------------------------------------------------------------------===//
12*810390e3Srobert #include "tsan_trace.h"
13*810390e3Srobert 
14*810390e3Srobert #include <pthread.h>
15*810390e3Srobert 
16*810390e3Srobert #include "gtest/gtest.h"
17*810390e3Srobert #include "tsan_rtl.h"
18*810390e3Srobert 
19*810390e3Srobert #if !defined(__x86_64__)
20*810390e3Srobert // These tests are currently crashing on ppc64:
21*810390e3Srobert // https://reviews.llvm.org/D110546#3025422
22*810390e3Srobert // due to the way we create thread contexts
23*810390e3Srobert // There must be some difference in thread initialization
24*810390e3Srobert // between normal execution and unit tests.
25*810390e3Srobert #  define TRACE_TEST(SUITE, NAME) TEST(SUITE, DISABLED_##NAME)
26*810390e3Srobert #else
27*810390e3Srobert #  define TRACE_TEST(SUITE, NAME) TEST(SUITE, NAME)
28*810390e3Srobert #endif
29*810390e3Srobert 
30*810390e3Srobert namespace __tsan {
31*810390e3Srobert 
32*810390e3Srobert // We need to run all trace tests in a new thread,
33*810390e3Srobert // so that the thread trace is empty initially.
34*810390e3Srobert template <uptr N>
35*810390e3Srobert struct ThreadArray {
ThreadArray__tsan::ThreadArray36*810390e3Srobert   ThreadArray() {
37*810390e3Srobert     for (auto *&thr : threads) {
38*810390e3Srobert       thr = static_cast<ThreadState *>(
39*810390e3Srobert           MmapOrDie(sizeof(ThreadState), "ThreadState"));
40*810390e3Srobert       Tid tid = ThreadCreate(cur_thread(), 0, 0, true);
41*810390e3Srobert       Processor *proc = ProcCreate();
42*810390e3Srobert       ProcWire(proc, thr);
43*810390e3Srobert       ThreadStart(thr, tid, 0, ThreadType::Fiber);
44*810390e3Srobert     }
45*810390e3Srobert   }
46*810390e3Srobert 
~ThreadArray__tsan::ThreadArray47*810390e3Srobert   ~ThreadArray() {
48*810390e3Srobert     for (uptr i = 0; i < N; i++) {
49*810390e3Srobert       if (threads[i])
50*810390e3Srobert         Finish(i);
51*810390e3Srobert     }
52*810390e3Srobert   }
53*810390e3Srobert 
Finish__tsan::ThreadArray54*810390e3Srobert   void Finish(uptr i) {
55*810390e3Srobert     auto *thr = threads[i];
56*810390e3Srobert     threads[i] = nullptr;
57*810390e3Srobert     Processor *proc = thr->proc();
58*810390e3Srobert     ThreadFinish(thr);
59*810390e3Srobert     ProcUnwire(proc, thr);
60*810390e3Srobert     ProcDestroy(proc);
61*810390e3Srobert     UnmapOrDie(thr, sizeof(ThreadState));
62*810390e3Srobert   }
63*810390e3Srobert 
64*810390e3Srobert   ThreadState *threads[N];
operator []__tsan::ThreadArray65*810390e3Srobert   ThreadState *operator[](uptr i) { return threads[i]; }
operator ->__tsan::ThreadArray66*810390e3Srobert   ThreadState *operator->() { return threads[0]; }
operator ThreadState*__tsan::ThreadArray67*810390e3Srobert   operator ThreadState *() { return threads[0]; }
68*810390e3Srobert };
69*810390e3Srobert 
TRACE_TEST(Trace,RestoreAccess)70*810390e3Srobert TRACE_TEST(Trace, RestoreAccess) {
71*810390e3Srobert   // A basic test with some function entry/exit events,
72*810390e3Srobert   // some mutex lock/unlock events and some other distracting
73*810390e3Srobert   // memory events.
74*810390e3Srobert   ThreadArray<1> thr;
75*810390e3Srobert   TraceFunc(thr, 0x1000);
76*810390e3Srobert   TraceFunc(thr, 0x1001);
77*810390e3Srobert   TraceMutexLock(thr, EventType::kLock, 0x4000, 0x5000, 0x6000);
78*810390e3Srobert   TraceMutexLock(thr, EventType::kLock, 0x4001, 0x5001, 0x6001);
79*810390e3Srobert   TraceMutexUnlock(thr, 0x5000);
80*810390e3Srobert   TraceFunc(thr);
81*810390e3Srobert   CHECK(TryTraceMemoryAccess(thr, 0x2001, 0x3001, 8, kAccessRead));
82*810390e3Srobert   TraceMutexLock(thr, EventType::kRLock, 0x4002, 0x5002, 0x6002);
83*810390e3Srobert   TraceFunc(thr, 0x1002);
84*810390e3Srobert   CHECK(TryTraceMemoryAccess(thr, 0x2000, 0x3000, 8, kAccessRead));
85*810390e3Srobert   // This is the access we want to find.
86*810390e3Srobert   // The previous one is equivalent, but RestoreStack must prefer
87*810390e3Srobert   // the last of the matchig accesses.
88*810390e3Srobert   CHECK(TryTraceMemoryAccess(thr, 0x2002, 0x3000, 8, kAccessRead));
89*810390e3Srobert   Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx);
90*810390e3Srobert   ThreadRegistryLock lock1(&ctx->thread_registry);
91*810390e3Srobert   Lock lock2(&ctx->slot_mtx);
92*810390e3Srobert   Tid tid = kInvalidTid;
93*810390e3Srobert   VarSizeStackTrace stk;
94*810390e3Srobert   MutexSet mset;
95*810390e3Srobert   uptr tag = kExternalTagNone;
96*810390e3Srobert   bool res = RestoreStack(EventType::kAccessExt, thr->fast_state.sid(),
97*810390e3Srobert                           thr->fast_state.epoch(), 0x3000, 8, kAccessRead, &tid,
98*810390e3Srobert                           &stk, &mset, &tag);
99*810390e3Srobert   CHECK(res);
100*810390e3Srobert   CHECK_EQ(tid, thr->tid);
101*810390e3Srobert   CHECK_EQ(stk.size, 3);
102*810390e3Srobert   CHECK_EQ(stk.trace[0], 0x1000);
103*810390e3Srobert   CHECK_EQ(stk.trace[1], 0x1002);
104*810390e3Srobert   CHECK_EQ(stk.trace[2], 0x2002);
105*810390e3Srobert   CHECK_EQ(mset.Size(), 2);
106*810390e3Srobert   CHECK_EQ(mset.Get(0).addr, 0x5001);
107*810390e3Srobert   CHECK_EQ(mset.Get(0).stack_id, 0x6001);
108*810390e3Srobert   CHECK_EQ(mset.Get(0).write, true);
109*810390e3Srobert   CHECK_EQ(mset.Get(1).addr, 0x5002);
110*810390e3Srobert   CHECK_EQ(mset.Get(1).stack_id, 0x6002);
111*810390e3Srobert   CHECK_EQ(mset.Get(1).write, false);
112*810390e3Srobert   CHECK_EQ(tag, kExternalTagNone);
113*810390e3Srobert }
114*810390e3Srobert 
TRACE_TEST(Trace,MemoryAccessSize)115*810390e3Srobert TRACE_TEST(Trace, MemoryAccessSize) {
116*810390e3Srobert   // Test tracing and matching of accesses of different sizes.
117*810390e3Srobert   struct Params {
118*810390e3Srobert     uptr access_size, offset, size;
119*810390e3Srobert     bool res;
120*810390e3Srobert   };
121*810390e3Srobert   Params tests[] = {
122*810390e3Srobert       {1, 0, 1, true},  {4, 0, 2, true},
123*810390e3Srobert       {4, 2, 2, true},  {8, 3, 1, true},
124*810390e3Srobert       {2, 1, 1, true},  {1, 1, 1, false},
125*810390e3Srobert       {8, 5, 4, false}, {4, static_cast<uptr>(-1l), 4, false},
126*810390e3Srobert   };
127*810390e3Srobert   for (auto params : tests) {
128*810390e3Srobert     for (int type = 0; type < 3; type++) {
129*810390e3Srobert       ThreadArray<1> thr;
130*810390e3Srobert       Printf("access_size=%zu, offset=%zu, size=%zu, res=%d, type=%d\n",
131*810390e3Srobert              params.access_size, params.offset, params.size, params.res, type);
132*810390e3Srobert       TraceFunc(thr, 0x1000);
133*810390e3Srobert       switch (type) {
134*810390e3Srobert         case 0:
135*810390e3Srobert           // This should emit compressed event.
136*810390e3Srobert           CHECK(TryTraceMemoryAccess(thr, 0x2000, 0x3000, params.access_size,
137*810390e3Srobert                                      kAccessRead));
138*810390e3Srobert           break;
139*810390e3Srobert         case 1:
140*810390e3Srobert           // This should emit full event.
141*810390e3Srobert           CHECK(TryTraceMemoryAccess(thr, 0x2000000, 0x3000, params.access_size,
142*810390e3Srobert                                      kAccessRead));
143*810390e3Srobert           break;
144*810390e3Srobert         case 2:
145*810390e3Srobert           TraceMemoryAccessRange(thr, 0x2000000, 0x3000, params.access_size,
146*810390e3Srobert                                  kAccessRead);
147*810390e3Srobert           break;
148*810390e3Srobert       }
149*810390e3Srobert       Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx);
150*810390e3Srobert       ThreadRegistryLock lock1(&ctx->thread_registry);
151*810390e3Srobert       Lock lock2(&ctx->slot_mtx);
152*810390e3Srobert       Tid tid = kInvalidTid;
153*810390e3Srobert       VarSizeStackTrace stk;
154*810390e3Srobert       MutexSet mset;
155*810390e3Srobert       uptr tag = kExternalTagNone;
156*810390e3Srobert       bool res =
157*810390e3Srobert           RestoreStack(EventType::kAccessExt, thr->fast_state.sid(),
158*810390e3Srobert                        thr->fast_state.epoch(), 0x3000 + params.offset,
159*810390e3Srobert                        params.size, kAccessRead, &tid, &stk, &mset, &tag);
160*810390e3Srobert       CHECK_EQ(res, params.res);
161*810390e3Srobert       if (params.res) {
162*810390e3Srobert         CHECK_EQ(stk.size, 2);
163*810390e3Srobert         CHECK_EQ(stk.trace[0], 0x1000);
164*810390e3Srobert         CHECK_EQ(stk.trace[1], type ? 0x2000000 : 0x2000);
165*810390e3Srobert       }
166*810390e3Srobert     }
167*810390e3Srobert   }
168*810390e3Srobert }
169*810390e3Srobert 
TRACE_TEST(Trace,RestoreMutexLock)170*810390e3Srobert TRACE_TEST(Trace, RestoreMutexLock) {
171*810390e3Srobert   // Check of restoration of a mutex lock event.
172*810390e3Srobert   ThreadArray<1> thr;
173*810390e3Srobert   TraceFunc(thr, 0x1000);
174*810390e3Srobert   TraceMutexLock(thr, EventType::kLock, 0x4000, 0x5000, 0x6000);
175*810390e3Srobert   TraceMutexLock(thr, EventType::kRLock, 0x4001, 0x5001, 0x6001);
176*810390e3Srobert   TraceMutexLock(thr, EventType::kRLock, 0x4002, 0x5001, 0x6002);
177*810390e3Srobert   Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx);
178*810390e3Srobert   ThreadRegistryLock lock1(&ctx->thread_registry);
179*810390e3Srobert   Lock lock2(&ctx->slot_mtx);
180*810390e3Srobert   Tid tid = kInvalidTid;
181*810390e3Srobert   VarSizeStackTrace stk;
182*810390e3Srobert   MutexSet mset;
183*810390e3Srobert   uptr tag = kExternalTagNone;
184*810390e3Srobert   bool res = RestoreStack(EventType::kLock, thr->fast_state.sid(),
185*810390e3Srobert                           thr->fast_state.epoch(), 0x5001, 0, 0, &tid, &stk,
186*810390e3Srobert                           &mset, &tag);
187*810390e3Srobert   CHECK(res);
188*810390e3Srobert   CHECK_EQ(stk.size, 2);
189*810390e3Srobert   CHECK_EQ(stk.trace[0], 0x1000);
190*810390e3Srobert   CHECK_EQ(stk.trace[1], 0x4002);
191*810390e3Srobert   CHECK_EQ(mset.Size(), 2);
192*810390e3Srobert   CHECK_EQ(mset.Get(0).addr, 0x5000);
193*810390e3Srobert   CHECK_EQ(mset.Get(0).stack_id, 0x6000);
194*810390e3Srobert   CHECK_EQ(mset.Get(0).write, true);
195*810390e3Srobert   CHECK_EQ(mset.Get(1).addr, 0x5001);
196*810390e3Srobert   CHECK_EQ(mset.Get(1).stack_id, 0x6001);
197*810390e3Srobert   CHECK_EQ(mset.Get(1).write, false);
198*810390e3Srobert }
199*810390e3Srobert 
TRACE_TEST(Trace,MultiPart)200*810390e3Srobert TRACE_TEST(Trace, MultiPart) {
201*810390e3Srobert   // Check replay of a trace with multiple parts.
202*810390e3Srobert   ThreadArray<1> thr;
203*810390e3Srobert   FuncEntry(thr, 0x1000);
204*810390e3Srobert   FuncEntry(thr, 0x2000);
205*810390e3Srobert   MutexPreLock(thr, 0x4000, 0x5000, 0);
206*810390e3Srobert   MutexPostLock(thr, 0x4000, 0x5000, 0);
207*810390e3Srobert   MutexPreLock(thr, 0x4000, 0x5000, 0);
208*810390e3Srobert   MutexPostLock(thr, 0x4000, 0x5000, 0);
209*810390e3Srobert   const uptr kEvents = 3 * sizeof(TracePart) / sizeof(Event);
210*810390e3Srobert   for (uptr i = 0; i < kEvents; i++) {
211*810390e3Srobert     FuncEntry(thr, 0x3000);
212*810390e3Srobert     MutexPreLock(thr, 0x4002, 0x5002, 0);
213*810390e3Srobert     MutexPostLock(thr, 0x4002, 0x5002, 0);
214*810390e3Srobert     MutexUnlock(thr, 0x4003, 0x5002, 0);
215*810390e3Srobert     FuncExit(thr);
216*810390e3Srobert   }
217*810390e3Srobert   FuncEntry(thr, 0x4000);
218*810390e3Srobert   TraceMutexLock(thr, EventType::kRLock, 0x4001, 0x5001, 0x6001);
219*810390e3Srobert   CHECK(TryTraceMemoryAccess(thr, 0x2002, 0x3000, 8, kAccessRead));
220*810390e3Srobert   Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx);
221*810390e3Srobert   ThreadRegistryLock lock1(&ctx->thread_registry);
222*810390e3Srobert   Lock lock2(&ctx->slot_mtx);
223*810390e3Srobert   Tid tid = kInvalidTid;
224*810390e3Srobert   VarSizeStackTrace stk;
225*810390e3Srobert   MutexSet mset;
226*810390e3Srobert   uptr tag = kExternalTagNone;
227*810390e3Srobert   bool res = RestoreStack(EventType::kAccessExt, thr->fast_state.sid(),
228*810390e3Srobert                           thr->fast_state.epoch(), 0x3000, 8, kAccessRead, &tid,
229*810390e3Srobert                           &stk, &mset, &tag);
230*810390e3Srobert   CHECK(res);
231*810390e3Srobert   CHECK_EQ(tid, thr->tid);
232*810390e3Srobert   CHECK_EQ(stk.size, 4);
233*810390e3Srobert   CHECK_EQ(stk.trace[0], 0x1000);
234*810390e3Srobert   CHECK_EQ(stk.trace[1], 0x2000);
235*810390e3Srobert   CHECK_EQ(stk.trace[2], 0x4000);
236*810390e3Srobert   CHECK_EQ(stk.trace[3], 0x2002);
237*810390e3Srobert   CHECK_EQ(mset.Size(), 2);
238*810390e3Srobert   CHECK_EQ(mset.Get(0).addr, 0x5000);
239*810390e3Srobert   CHECK_EQ(mset.Get(0).write, true);
240*810390e3Srobert   CHECK_EQ(mset.Get(0).count, 2);
241*810390e3Srobert   CHECK_EQ(mset.Get(1).addr, 0x5001);
242*810390e3Srobert   CHECK_EQ(mset.Get(1).write, false);
243*810390e3Srobert   CHECK_EQ(mset.Get(1).count, 1);
244*810390e3Srobert }
245*810390e3Srobert 
TRACE_TEST(Trace,DeepSwitch)246*810390e3Srobert TRACE_TEST(Trace, DeepSwitch) {
247*810390e3Srobert   ThreadArray<1> thr;
248*810390e3Srobert   for (int i = 0; i < 2000; i++) {
249*810390e3Srobert     FuncEntry(thr, 0x1000);
250*810390e3Srobert     const uptr kEvents = sizeof(TracePart) / sizeof(Event);
251*810390e3Srobert     for (uptr i = 0; i < kEvents; i++) {
252*810390e3Srobert       TraceMutexLock(thr, EventType::kLock, 0x4000, 0x5000, 0x6000);
253*810390e3Srobert       TraceMutexUnlock(thr, 0x5000);
254*810390e3Srobert     }
255*810390e3Srobert   }
256*810390e3Srobert }
257*810390e3Srobert 
CheckTraceState(uptr count,uptr finished,uptr excess,uptr recycle)258*810390e3Srobert void CheckTraceState(uptr count, uptr finished, uptr excess, uptr recycle) {
259*810390e3Srobert   Lock l(&ctx->slot_mtx);
260*810390e3Srobert   Printf("CheckTraceState(%zu/%zu, %zu/%zu, %zu/%zu, %zu/%zu)\n",
261*810390e3Srobert          ctx->trace_part_total_allocated, count,
262*810390e3Srobert          ctx->trace_part_recycle_finished, finished,
263*810390e3Srobert          ctx->trace_part_finished_excess, excess,
264*810390e3Srobert          ctx->trace_part_recycle.Size(), recycle);
265*810390e3Srobert   CHECK_EQ(ctx->trace_part_total_allocated, count);
266*810390e3Srobert   CHECK_EQ(ctx->trace_part_recycle_finished, finished);
267*810390e3Srobert   CHECK_EQ(ctx->trace_part_finished_excess, excess);
268*810390e3Srobert   CHECK_EQ(ctx->trace_part_recycle.Size(), recycle);
269*810390e3Srobert }
270*810390e3Srobert 
TRACE_TEST(TraceAlloc,SingleThread)271*810390e3Srobert TRACE_TEST(TraceAlloc, SingleThread) {
272*810390e3Srobert   TraceResetForTesting();
273*810390e3Srobert   auto check_thread = [&](ThreadState *thr, uptr size, uptr count,
274*810390e3Srobert                           uptr finished, uptr excess, uptr recycle) {
275*810390e3Srobert     CHECK_EQ(thr->tctx->trace.parts.Size(), size);
276*810390e3Srobert     CheckTraceState(count, finished, excess, recycle);
277*810390e3Srobert   };
278*810390e3Srobert   ThreadArray<2> threads;
279*810390e3Srobert   check_thread(threads[0], 0, 0, 0, 0, 0);
280*810390e3Srobert   TraceSwitchPartImpl(threads[0]);
281*810390e3Srobert   check_thread(threads[0], 1, 1, 0, 0, 0);
282*810390e3Srobert   TraceSwitchPartImpl(threads[0]);
283*810390e3Srobert   check_thread(threads[0], 2, 2, 0, 0, 0);
284*810390e3Srobert   TraceSwitchPartImpl(threads[0]);
285*810390e3Srobert   check_thread(threads[0], 3, 3, 0, 0, 1);
286*810390e3Srobert   TraceSwitchPartImpl(threads[0]);
287*810390e3Srobert   check_thread(threads[0], 3, 3, 0, 0, 1);
288*810390e3Srobert   threads.Finish(0);
289*810390e3Srobert   CheckTraceState(3, 3, 0, 3);
290*810390e3Srobert   threads.Finish(1);
291*810390e3Srobert   CheckTraceState(3, 3, 0, 3);
292*810390e3Srobert }
293*810390e3Srobert 
TRACE_TEST(TraceAlloc,FinishedThreadReuse)294*810390e3Srobert TRACE_TEST(TraceAlloc, FinishedThreadReuse) {
295*810390e3Srobert   TraceResetForTesting();
296*810390e3Srobert   constexpr uptr Hi = Trace::kFinishedThreadHi;
297*810390e3Srobert   constexpr uptr kThreads = 4 * Hi;
298*810390e3Srobert   ThreadArray<kThreads> threads;
299*810390e3Srobert   for (uptr i = 0; i < kThreads; i++) {
300*810390e3Srobert     Printf("thread %zu\n", i);
301*810390e3Srobert     TraceSwitchPartImpl(threads[i]);
302*810390e3Srobert     if (i <= Hi)
303*810390e3Srobert       CheckTraceState(i + 1, i, 0, i);
304*810390e3Srobert     else if (i <= 2 * Hi)
305*810390e3Srobert       CheckTraceState(Hi + 1, Hi, i - Hi, Hi);
306*810390e3Srobert     else
307*810390e3Srobert       CheckTraceState(Hi + 1, Hi, Hi, Hi);
308*810390e3Srobert     threads.Finish(i);
309*810390e3Srobert     if (i < Hi)
310*810390e3Srobert       CheckTraceState(i + 1, i + 1, 0, i + 1);
311*810390e3Srobert     else if (i < 2 * Hi)
312*810390e3Srobert       CheckTraceState(Hi + 1, Hi + 1, i - Hi + 1, Hi + 1);
313*810390e3Srobert     else
314*810390e3Srobert       CheckTraceState(Hi + 1, Hi + 1, Hi + 1, Hi + 1);
315*810390e3Srobert   }
316*810390e3Srobert }
317*810390e3Srobert 
TRACE_TEST(TraceAlloc,FinishedThreadReuse2)318*810390e3Srobert TRACE_TEST(TraceAlloc, FinishedThreadReuse2) {
319*810390e3Srobert   TraceResetForTesting();
320*810390e3Srobert   // constexpr uptr Lo = Trace::kFinishedThreadLo;
321*810390e3Srobert   // constexpr uptr Hi = Trace::kFinishedThreadHi;
322*810390e3Srobert   constexpr uptr Min = Trace::kMinParts;
323*810390e3Srobert   constexpr uptr kThreads = 10;
324*810390e3Srobert   constexpr uptr kParts = 2 * Min;
325*810390e3Srobert   ThreadArray<kThreads> threads;
326*810390e3Srobert   for (uptr i = 0; i < kThreads; i++) {
327*810390e3Srobert     Printf("thread %zu\n", i);
328*810390e3Srobert     for (uptr j = 0; j < kParts; j++) TraceSwitchPartImpl(threads[i]);
329*810390e3Srobert     if (i == 0)
330*810390e3Srobert       CheckTraceState(Min, 0, 0, 1);
331*810390e3Srobert     else
332*810390e3Srobert       CheckTraceState(2 * Min, 0, Min, Min + 1);
333*810390e3Srobert     threads.Finish(i);
334*810390e3Srobert     if (i == 0)
335*810390e3Srobert       CheckTraceState(Min, Min, 0, Min);
336*810390e3Srobert     else
337*810390e3Srobert       CheckTraceState(2 * Min, 2 * Min, Min, 2 * Min);
338*810390e3Srobert   }
339*810390e3Srobert }
340*810390e3Srobert 
341*810390e3Srobert }  // namespace __tsan
342