10b57cec5SDimitry Andric //===-- tsan_trace.h --------------------------------------------*- C++ -*-===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector. 100b57cec5SDimitry Andric // 110b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 120b57cec5SDimitry Andric #ifndef TSAN_TRACE_H 130b57cec5SDimitry Andric #define TSAN_TRACE_H 140b57cec5SDimitry Andric 150b57cec5SDimitry Andric #include "tsan_defs.h" 16349cc55cSDimitry Andric #include "tsan_ilist.h" 170b57cec5SDimitry Andric #include "tsan_mutexset.h" 18349cc55cSDimitry Andric #include "tsan_stack_trace.h" 190b57cec5SDimitry Andric 200b57cec5SDimitry Andric namespace __tsan { 210b57cec5SDimitry Andric 22349cc55cSDimitry Andric enum class EventType : u64 { 23349cc55cSDimitry Andric kAccessExt, 24349cc55cSDimitry Andric kAccessRange, 25349cc55cSDimitry Andric kLock, 26349cc55cSDimitry Andric kRLock, 27349cc55cSDimitry Andric kUnlock, 28349cc55cSDimitry Andric kTime, 29349cc55cSDimitry Andric }; 30349cc55cSDimitry Andric 31349cc55cSDimitry Andric // "Base" type for all events for type dispatch. 32349cc55cSDimitry Andric struct Event { 33349cc55cSDimitry Andric // We use variable-length type encoding to give more bits to some event 34349cc55cSDimitry Andric // types that need them. If is_access is set, this is EventAccess. 35349cc55cSDimitry Andric // Otherwise, if is_func is set, this is EventFunc. 36349cc55cSDimitry Andric // Otherwise type denotes the type. 37349cc55cSDimitry Andric u64 is_access : 1; 38349cc55cSDimitry Andric u64 is_func : 1; 39349cc55cSDimitry Andric EventType type : 3; 40349cc55cSDimitry Andric u64 _ : 59; 41349cc55cSDimitry Andric }; 42349cc55cSDimitry Andric static_assert(sizeof(Event) == 8, "bad Event size"); 43349cc55cSDimitry Andric 44349cc55cSDimitry Andric // Nop event used as padding and does not affect state during replay. 45349cc55cSDimitry Andric static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0}; 46349cc55cSDimitry Andric 47349cc55cSDimitry Andric // Compressed memory access can represent only some events with PCs 48349cc55cSDimitry Andric // close enough to each other. Otherwise we fall back to EventAccessExt. 49349cc55cSDimitry Andric struct EventAccess { 50349cc55cSDimitry Andric static constexpr uptr kPCBits = 15; 51349cc55cSDimitry Andric static_assert(kPCBits + kCompressedAddrBits + 5 == 64, 52349cc55cSDimitry Andric "unused bits in EventAccess"); 53349cc55cSDimitry Andric 54349cc55cSDimitry Andric u64 is_access : 1; // = 1 55349cc55cSDimitry Andric u64 is_read : 1; 56349cc55cSDimitry Andric u64 is_atomic : 1; 57349cc55cSDimitry Andric u64 size_log : 2; 58349cc55cSDimitry Andric u64 pc_delta : kPCBits; // signed delta from the previous memory access PC 59349cc55cSDimitry Andric u64 addr : kCompressedAddrBits; 60349cc55cSDimitry Andric }; 61349cc55cSDimitry Andric static_assert(sizeof(EventAccess) == 8, "bad EventAccess size"); 62349cc55cSDimitry Andric 63349cc55cSDimitry Andric // Function entry (pc != 0) or exit (pc == 0). 64349cc55cSDimitry Andric struct EventFunc { 65349cc55cSDimitry Andric u64 is_access : 1; // = 0 66349cc55cSDimitry Andric u64 is_func : 1; // = 1 67349cc55cSDimitry Andric u64 pc : 62; 68349cc55cSDimitry Andric }; 69349cc55cSDimitry Andric static_assert(sizeof(EventFunc) == 8, "bad EventFunc size"); 70349cc55cSDimitry Andric 71349cc55cSDimitry Andric // Extended memory access with full PC. 72349cc55cSDimitry Andric struct EventAccessExt { 73349cc55cSDimitry Andric // Note: precisely specifying the unused parts of the bitfield is critical for 74349cc55cSDimitry Andric // performance. If we don't specify them, compiler will generate code to load 75349cc55cSDimitry Andric // the old value and shuffle it to extract the unused bits to apply to the new 76349cc55cSDimitry Andric // value. If we specify the unused part and store 0 in there, all that 77349cc55cSDimitry Andric // unnecessary code goes away (store of the 0 const is combined with other 78349cc55cSDimitry Andric // constant parts). 79349cc55cSDimitry Andric static constexpr uptr kUnusedBits = 11; 80349cc55cSDimitry Andric static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64, 81349cc55cSDimitry Andric "unused bits in EventAccessExt"); 82349cc55cSDimitry Andric 83349cc55cSDimitry Andric u64 is_access : 1; // = 0 84349cc55cSDimitry Andric u64 is_func : 1; // = 0 85349cc55cSDimitry Andric EventType type : 3; // = EventType::kAccessExt 86349cc55cSDimitry Andric u64 is_read : 1; 87349cc55cSDimitry Andric u64 is_atomic : 1; 88349cc55cSDimitry Andric u64 size_log : 2; 89349cc55cSDimitry Andric u64 _ : kUnusedBits; 90349cc55cSDimitry Andric u64 addr : kCompressedAddrBits; 91349cc55cSDimitry Andric u64 pc; 92349cc55cSDimitry Andric }; 93349cc55cSDimitry Andric static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size"); 94349cc55cSDimitry Andric 95349cc55cSDimitry Andric // Access to a memory range. 96349cc55cSDimitry Andric struct EventAccessRange { 97349cc55cSDimitry Andric static constexpr uptr kSizeLoBits = 13; 98349cc55cSDimitry Andric static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64, 99349cc55cSDimitry Andric "unused bits in EventAccessRange"); 100349cc55cSDimitry Andric 101349cc55cSDimitry Andric u64 is_access : 1; // = 0 102349cc55cSDimitry Andric u64 is_func : 1; // = 0 103349cc55cSDimitry Andric EventType type : 3; // = EventType::kAccessRange 104349cc55cSDimitry Andric u64 is_read : 1; 105349cc55cSDimitry Andric u64 is_free : 1; 106349cc55cSDimitry Andric u64 size_lo : kSizeLoBits; 107349cc55cSDimitry Andric u64 pc : kCompressedAddrBits; 108349cc55cSDimitry Andric u64 addr : kCompressedAddrBits; 109349cc55cSDimitry Andric u64 size_hi : 64 - kCompressedAddrBits; 110349cc55cSDimitry Andric }; 111349cc55cSDimitry Andric static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size"); 112349cc55cSDimitry Andric 113349cc55cSDimitry Andric // Mutex lock. 114349cc55cSDimitry Andric struct EventLock { 115349cc55cSDimitry Andric static constexpr uptr kStackIDLoBits = 15; 116349cc55cSDimitry Andric static constexpr uptr kStackIDHiBits = 117349cc55cSDimitry Andric sizeof(StackID) * kByteBits - kStackIDLoBits; 118349cc55cSDimitry Andric static constexpr uptr kUnusedBits = 3; 119349cc55cSDimitry Andric static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64, 120349cc55cSDimitry Andric "unused bits in EventLock"); 121349cc55cSDimitry Andric static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64, 122349cc55cSDimitry Andric "unused bits in EventLock"); 123349cc55cSDimitry Andric 124349cc55cSDimitry Andric u64 is_access : 1; // = 0 125349cc55cSDimitry Andric u64 is_func : 1; // = 0 126349cc55cSDimitry Andric EventType type : 3; // = EventType::kLock or EventType::kRLock 127349cc55cSDimitry Andric u64 pc : kCompressedAddrBits; 128349cc55cSDimitry Andric u64 stack_lo : kStackIDLoBits; 129349cc55cSDimitry Andric u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits; 130349cc55cSDimitry Andric u64 _ : kUnusedBits; 131349cc55cSDimitry Andric u64 addr : kCompressedAddrBits; 132349cc55cSDimitry Andric }; 133349cc55cSDimitry Andric static_assert(sizeof(EventLock) == 16, "bad EventLock size"); 134349cc55cSDimitry Andric 135349cc55cSDimitry Andric // Mutex unlock. 136349cc55cSDimitry Andric struct EventUnlock { 137349cc55cSDimitry Andric static constexpr uptr kUnusedBits = 15; 138349cc55cSDimitry Andric static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64, 139349cc55cSDimitry Andric "unused bits in EventUnlock"); 140349cc55cSDimitry Andric 141349cc55cSDimitry Andric u64 is_access : 1; // = 0 142349cc55cSDimitry Andric u64 is_func : 1; // = 0 143349cc55cSDimitry Andric EventType type : 3; // = EventType::kUnlock 144349cc55cSDimitry Andric u64 _ : kUnusedBits; 145349cc55cSDimitry Andric u64 addr : kCompressedAddrBits; 146349cc55cSDimitry Andric }; 147349cc55cSDimitry Andric static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size"); 148349cc55cSDimitry Andric 149349cc55cSDimitry Andric // Time change event. 150349cc55cSDimitry Andric struct EventTime { 151349cc55cSDimitry Andric static constexpr uptr kUnusedBits = 37; 152349cc55cSDimitry Andric static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64, 153349cc55cSDimitry Andric "unused bits in EventTime"); 154349cc55cSDimitry Andric 155349cc55cSDimitry Andric u64 is_access : 1; // = 0 156349cc55cSDimitry Andric u64 is_func : 1; // = 0 157349cc55cSDimitry Andric EventType type : 3; // = EventType::kTime 158349cc55cSDimitry Andric u64 sid : sizeof(Sid) * kByteBits; 159349cc55cSDimitry Andric u64 epoch : kEpochBits; 160349cc55cSDimitry Andric u64 _ : kUnusedBits; 161349cc55cSDimitry Andric }; 162349cc55cSDimitry Andric static_assert(sizeof(EventTime) == 8, "bad EventTime size"); 163349cc55cSDimitry Andric 164349cc55cSDimitry Andric struct Trace; 165349cc55cSDimitry Andric 166349cc55cSDimitry Andric struct TraceHeader { 167349cc55cSDimitry Andric Trace* trace = nullptr; // back-pointer to Trace containing this part 168349cc55cSDimitry Andric INode trace_parts; // in Trace::parts 169*0eae32dcSDimitry Andric INode global; // in Contex::trace_part_recycle 170349cc55cSDimitry Andric }; 171349cc55cSDimitry Andric 172349cc55cSDimitry Andric struct TracePart : TraceHeader { 173349cc55cSDimitry Andric // There are a lot of goroutines in Go, so we use smaller parts. 174349cc55cSDimitry Andric static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10; 175349cc55cSDimitry Andric static constexpr uptr kSize = 176349cc55cSDimitry Andric (kByteSize - sizeof(TraceHeader)) / sizeof(Event); 177349cc55cSDimitry Andric // TraceAcquire does a fast event pointer overflow check by comparing 178349cc55cSDimitry Andric // pointer into TracePart::events with kAlignment mask. Since TracePart's 179349cc55cSDimitry Andric // are allocated page-aligned, this check detects end of the array 180349cc55cSDimitry Andric // (it also have false positives in the middle that are filtered separately). 181349cc55cSDimitry Andric // This also requires events to be the last field. 182349cc55cSDimitry Andric static constexpr uptr kAlignment = 0xff0; 183349cc55cSDimitry Andric Event events[kSize]; 184349cc55cSDimitry Andric TracePartTracePart185349cc55cSDimitry Andric TracePart() {} 186349cc55cSDimitry Andric }; 187349cc55cSDimitry Andric static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size"); 188349cc55cSDimitry Andric 189349cc55cSDimitry Andric struct Trace { 190349cc55cSDimitry Andric Mutex mtx; 191349cc55cSDimitry Andric IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts; 192*0eae32dcSDimitry Andric // First node non-queued into ctx->trace_part_recycle. 193*0eae32dcSDimitry Andric TracePart* local_head; 194*0eae32dcSDimitry Andric // Final position in the last part for finished threads. 195*0eae32dcSDimitry Andric Event* final_pos = nullptr; 196*0eae32dcSDimitry Andric // Number of trace parts allocated on behalf of this trace specifically. 197*0eae32dcSDimitry Andric // Total number of parts in this trace can be larger if we retake some 198*0eae32dcSDimitry Andric // parts from other traces. 199*0eae32dcSDimitry Andric uptr parts_allocated = 0; 200349cc55cSDimitry Andric TraceTrace201349cc55cSDimitry Andric Trace() : mtx(MutexTypeTrace) {} 202349cc55cSDimitry Andric 203*0eae32dcSDimitry Andric // We need at least 3 parts per thread, because we want to keep at last 204*0eae32dcSDimitry Andric // 2 parts per thread that are not queued into ctx->trace_part_recycle 205*0eae32dcSDimitry Andric // (the current one being filled and one full part that ensures that 206*0eae32dcSDimitry Andric // we always have at least one part worth of previous memory accesses). 207*0eae32dcSDimitry Andric static constexpr uptr kMinParts = 3; 208*0eae32dcSDimitry Andric 209*0eae32dcSDimitry Andric static constexpr uptr kFinishedThreadLo = 16; 210*0eae32dcSDimitry Andric static constexpr uptr kFinishedThreadHi = 64; 211*0eae32dcSDimitry Andric }; 212349cc55cSDimitry Andric 2130b57cec5SDimitry Andric } // namespace __tsan 2140b57cec5SDimitry Andric 2150b57cec5SDimitry Andric #endif // TSAN_TRACE_H 216