xref: /openbsd-src/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_trace.h (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of ThreadSanitizer (TSan), a race detector.
103cab2bb3Spatrick //
113cab2bb3Spatrick //===----------------------------------------------------------------------===//
123cab2bb3Spatrick #ifndef TSAN_TRACE_H
133cab2bb3Spatrick #define TSAN_TRACE_H
143cab2bb3Spatrick 
153cab2bb3Spatrick #include "tsan_defs.h"
16*810390e3Srobert #include "tsan_ilist.h"
173cab2bb3Spatrick #include "tsan_mutexset.h"
18*810390e3Srobert #include "tsan_stack_trace.h"
193cab2bb3Spatrick 
203cab2bb3Spatrick namespace __tsan {
213cab2bb3Spatrick 
22*810390e3Srobert enum class EventType : u64 {
23*810390e3Srobert   kAccessExt,
24*810390e3Srobert   kAccessRange,
25*810390e3Srobert   kLock,
26*810390e3Srobert   kRLock,
27*810390e3Srobert   kUnlock,
28*810390e3Srobert   kTime,
293cab2bb3Spatrick };
303cab2bb3Spatrick 
31*810390e3Srobert // "Base" type for all events for type dispatch.
32*810390e3Srobert struct Event {
33*810390e3Srobert   // We use variable-length type encoding to give more bits to some event
34*810390e3Srobert   // types that need them. If is_access is set, this is EventAccess.
35*810390e3Srobert   // Otherwise, if is_func is set, this is EventFunc.
36*810390e3Srobert   // Otherwise type denotes the type.
37*810390e3Srobert   u64 is_access : 1;
38*810390e3Srobert   u64 is_func : 1;
39*810390e3Srobert   EventType type : 3;
40*810390e3Srobert   u64 _ : 59;
41*810390e3Srobert };
42*810390e3Srobert static_assert(sizeof(Event) == 8, "bad Event size");
433cab2bb3Spatrick 
44*810390e3Srobert // Nop event used as padding and does not affect state during replay.
45*810390e3Srobert static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
46*810390e3Srobert 
47*810390e3Srobert // Compressed memory access can represent only some events with PCs
48*810390e3Srobert // close enough to each other. Otherwise we fall back to EventAccessExt.
49*810390e3Srobert struct EventAccess {
50*810390e3Srobert   static constexpr uptr kPCBits = 15;
51*810390e3Srobert   static_assert(kPCBits + kCompressedAddrBits + 5 == 64,
52*810390e3Srobert                 "unused bits in EventAccess");
53*810390e3Srobert 
54*810390e3Srobert   u64 is_access : 1;  // = 1
55*810390e3Srobert   u64 is_read : 1;
56*810390e3Srobert   u64 is_atomic : 1;
57*810390e3Srobert   u64 size_log : 2;
58*810390e3Srobert   u64 pc_delta : kPCBits;  // signed delta from the previous memory access PC
59*810390e3Srobert   u64 addr : kCompressedAddrBits;
60*810390e3Srobert };
61*810390e3Srobert static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
62*810390e3Srobert 
63*810390e3Srobert // Function entry (pc != 0) or exit (pc == 0).
64*810390e3Srobert struct EventFunc {
65*810390e3Srobert   u64 is_access : 1;  // = 0
66*810390e3Srobert   u64 is_func : 1;    // = 1
67*810390e3Srobert   u64 pc : 62;
68*810390e3Srobert };
69*810390e3Srobert static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
70*810390e3Srobert 
71*810390e3Srobert // Extended memory access with full PC.
72*810390e3Srobert struct EventAccessExt {
73*810390e3Srobert   // Note: precisely specifying the unused parts of the bitfield is critical for
74*810390e3Srobert   // performance. If we don't specify them, compiler will generate code to load
75*810390e3Srobert   // the old value and shuffle it to extract the unused bits to apply to the new
76*810390e3Srobert   // value. If we specify the unused part and store 0 in there, all that
77*810390e3Srobert   // unnecessary code goes away (store of the 0 const is combined with other
78*810390e3Srobert   // constant parts).
79*810390e3Srobert   static constexpr uptr kUnusedBits = 11;
80*810390e3Srobert   static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64,
81*810390e3Srobert                 "unused bits in EventAccessExt");
82*810390e3Srobert 
83*810390e3Srobert   u64 is_access : 1;   // = 0
84*810390e3Srobert   u64 is_func : 1;     // = 0
85*810390e3Srobert   EventType type : 3;  // = EventType::kAccessExt
86*810390e3Srobert   u64 is_read : 1;
87*810390e3Srobert   u64 is_atomic : 1;
88*810390e3Srobert   u64 size_log : 2;
89*810390e3Srobert   u64 _ : kUnusedBits;
90*810390e3Srobert   u64 addr : kCompressedAddrBits;
91*810390e3Srobert   u64 pc;
92*810390e3Srobert };
93*810390e3Srobert static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
94*810390e3Srobert 
95*810390e3Srobert // Access to a memory range.
96*810390e3Srobert struct EventAccessRange {
97*810390e3Srobert   static constexpr uptr kSizeLoBits = 13;
98*810390e3Srobert   static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64,
99*810390e3Srobert                 "unused bits in EventAccessRange");
100*810390e3Srobert 
101*810390e3Srobert   u64 is_access : 1;   // = 0
102*810390e3Srobert   u64 is_func : 1;     // = 0
103*810390e3Srobert   EventType type : 3;  // = EventType::kAccessRange
104*810390e3Srobert   u64 is_read : 1;
105*810390e3Srobert   u64 is_free : 1;
106*810390e3Srobert   u64 size_lo : kSizeLoBits;
107*810390e3Srobert   u64 pc : kCompressedAddrBits;
108*810390e3Srobert   u64 addr : kCompressedAddrBits;
109*810390e3Srobert   u64 size_hi : 64 - kCompressedAddrBits;
110*810390e3Srobert };
111*810390e3Srobert static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
112*810390e3Srobert 
113*810390e3Srobert // Mutex lock.
114*810390e3Srobert struct EventLock {
115*810390e3Srobert   static constexpr uptr kStackIDLoBits = 15;
116*810390e3Srobert   static constexpr uptr kStackIDHiBits =
117*810390e3Srobert       sizeof(StackID) * kByteBits - kStackIDLoBits;
118*810390e3Srobert   static constexpr uptr kUnusedBits = 3;
119*810390e3Srobert   static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64,
120*810390e3Srobert                 "unused bits in EventLock");
121*810390e3Srobert   static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64,
122*810390e3Srobert                 "unused bits in EventLock");
123*810390e3Srobert 
124*810390e3Srobert   u64 is_access : 1;   // = 0
125*810390e3Srobert   u64 is_func : 1;     // = 0
126*810390e3Srobert   EventType type : 3;  // = EventType::kLock or EventType::kRLock
127*810390e3Srobert   u64 pc : kCompressedAddrBits;
128*810390e3Srobert   u64 stack_lo : kStackIDLoBits;
129*810390e3Srobert   u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
130*810390e3Srobert   u64 _ : kUnusedBits;
131*810390e3Srobert   u64 addr : kCompressedAddrBits;
132*810390e3Srobert };
133*810390e3Srobert static_assert(sizeof(EventLock) == 16, "bad EventLock size");
134*810390e3Srobert 
135*810390e3Srobert // Mutex unlock.
136*810390e3Srobert struct EventUnlock {
137*810390e3Srobert   static constexpr uptr kUnusedBits = 15;
138*810390e3Srobert   static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64,
139*810390e3Srobert                 "unused bits in EventUnlock");
140*810390e3Srobert 
141*810390e3Srobert   u64 is_access : 1;   // = 0
142*810390e3Srobert   u64 is_func : 1;     // = 0
143*810390e3Srobert   EventType type : 3;  // = EventType::kUnlock
144*810390e3Srobert   u64 _ : kUnusedBits;
145*810390e3Srobert   u64 addr : kCompressedAddrBits;
146*810390e3Srobert };
147*810390e3Srobert static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
148*810390e3Srobert 
149*810390e3Srobert // Time change event.
150*810390e3Srobert struct EventTime {
151*810390e3Srobert   static constexpr uptr kUnusedBits = 37;
152*810390e3Srobert   static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64,
153*810390e3Srobert                 "unused bits in EventTime");
154*810390e3Srobert 
155*810390e3Srobert   u64 is_access : 1;   // = 0
156*810390e3Srobert   u64 is_func : 1;     // = 0
157*810390e3Srobert   EventType type : 3;  // = EventType::kTime
158*810390e3Srobert   u64 sid : sizeof(Sid) * kByteBits;
159*810390e3Srobert   u64 epoch : kEpochBits;
160*810390e3Srobert   u64 _ : kUnusedBits;
161*810390e3Srobert };
162*810390e3Srobert static_assert(sizeof(EventTime) == 8, "bad EventTime size");
163*810390e3Srobert 
164*810390e3Srobert struct Trace;
1653cab2bb3Spatrick 
1663cab2bb3Spatrick struct TraceHeader {
167*810390e3Srobert   Trace* trace = nullptr;  // back-pointer to Trace containing this part
168*810390e3Srobert   INode trace_parts;       // in Trace::parts
169*810390e3Srobert   INode global;            // in Contex::trace_part_recycle
1703cab2bb3Spatrick };
1713cab2bb3Spatrick 
172*810390e3Srobert struct TracePart : TraceHeader {
173*810390e3Srobert   // There are a lot of goroutines in Go, so we use smaller parts.
174*810390e3Srobert   static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10;
175*810390e3Srobert   static constexpr uptr kSize =
176*810390e3Srobert       (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
177*810390e3Srobert   // TraceAcquire does a fast event pointer overflow check by comparing
178*810390e3Srobert   // pointer into TracePart::events with kAlignment mask. Since TracePart's
179*810390e3Srobert   // are allocated page-aligned, this check detects end of the array
180*810390e3Srobert   // (it also have false positives in the middle that are filtered separately).
181*810390e3Srobert   // This also requires events to be the last field.
182*810390e3Srobert   static constexpr uptr kAlignment = 0xff0;
183*810390e3Srobert   Event events[kSize];
184*810390e3Srobert 
TracePartTracePart185*810390e3Srobert   TracePart() {}
186*810390e3Srobert };
187*810390e3Srobert static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
188*810390e3Srobert 
1893cab2bb3Spatrick struct Trace {
1903cab2bb3Spatrick   Mutex mtx;
191*810390e3Srobert   IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
192*810390e3Srobert   // First node non-queued into ctx->trace_part_recycle.
193*810390e3Srobert   TracePart* local_head;
194*810390e3Srobert   // Final position in the last part for finished threads.
195*810390e3Srobert   Event* final_pos = nullptr;
196*810390e3Srobert   // Number of trace parts allocated on behalf of this trace specifically.
197*810390e3Srobert   // Total number of parts in this trace can be larger if we retake some
198*810390e3Srobert   // parts from other traces.
199*810390e3Srobert   uptr parts_allocated = 0;
2003cab2bb3Spatrick 
TraceTrace201d89ec533Spatrick   Trace() : mtx(MutexTypeTrace) {}
202*810390e3Srobert 
203*810390e3Srobert   // We need at least 3 parts per thread, because we want to keep at last
204*810390e3Srobert   // 2 parts per thread that are not queued into ctx->trace_part_recycle
205*810390e3Srobert   // (the current one being filled and one full part that ensures that
206*810390e3Srobert   // we always have at least one part worth of previous memory accesses).
207*810390e3Srobert   static constexpr uptr kMinParts = 3;
208*810390e3Srobert 
209*810390e3Srobert   static constexpr uptr kFinishedThreadLo = 16;
210*810390e3Srobert   static constexpr uptr kFinishedThreadHi = 64;
2113cab2bb3Spatrick };
2123cab2bb3Spatrick 
2133cab2bb3Spatrick }  // namespace __tsan
2143cab2bb3Spatrick 
2153cab2bb3Spatrick #endif  // TSAN_TRACE_H
216