xref: /netbsd-src/sys/external/bsd/compiler_rt/dist/lib/xray/xray_fdr_log_writer.h (revision a7c257b03e4462df2b1020128fb82716512d7856)
1 //===-- xray_fdr_log_writer.h ---------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of XRay, a function call tracing system.
11 //
12 //===----------------------------------------------------------------------===//
13 #ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
14 #define COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
15 
16 #include "xray_buffer_queue.h"
17 #include "xray_fdr_log_records.h"
18 #include <functional>
19 #include <tuple>
20 #include <type_traits>
21 #include <utility>
22 
23 namespace __xray {
24 
25 template <size_t Index> struct SerializerImpl {
26   template <class Tuple,
27             typename std::enable_if<
28                 Index<std::tuple_size<
29                           typename std::remove_reference<Tuple>::type>::value,
30                       int>::type = 0> static void serializeTo(char *Buffer,
31                                                               Tuple &&T) {
32     auto P = reinterpret_cast<const char *>(&std::get<Index>(T));
33     constexpr auto Size = sizeof(std::get<Index>(T));
34     internal_memcpy(Buffer, P, Size);
35     SerializerImpl<Index + 1>::serializeTo(Buffer + Size,
36                                            std::forward<Tuple>(T));
37   }
38 
39   template <class Tuple,
40             typename std::enable_if<
41                 Index >= std::tuple_size<typename std::remove_reference<
42                              Tuple>::type>::value,
43                 int>::type = 0>
serializeToSerializerImpl44   static void serializeTo(char *, Tuple &&) {}
45 };
46 
47 using Serializer = SerializerImpl<0>;
48 
49 template <class Tuple, size_t Index> struct AggregateSizesImpl {
50   static constexpr size_t value =
51       sizeof(typename std::tuple_element<Index, Tuple>::type) +
52       AggregateSizesImpl<Tuple, Index - 1>::value;
53 };
54 
55 template <class Tuple> struct AggregateSizesImpl<Tuple, 0> {
56   static constexpr size_t value =
57       sizeof(typename std::tuple_element<0, Tuple>::type);
58 };
59 
60 template <class Tuple> struct AggregateSizes {
61   static constexpr size_t value =
62       AggregateSizesImpl<Tuple, std::tuple_size<Tuple>::value - 1>::value;
63 };
64 
65 template <MetadataRecord::RecordKinds Kind, class... DataTypes>
66 MetadataRecord createMetadataRecord(DataTypes &&... Ds) {
67   static_assert(AggregateSizes<std::tuple<DataTypes...>>::value <=
68                     sizeof(MetadataRecord) - 1,
69                 "Metadata payload longer than metadata buffer!");
70   MetadataRecord R;
71   R.Type = 1;
72   R.RecordKind = static_cast<uint8_t>(Kind);
73   Serializer::serializeTo(R.Data,
74                           std::make_tuple(std::forward<DataTypes>(Ds)...));
75   return R;
76 }
77 
78 class FDRLogWriter {
79   BufferQueue::Buffer &Buffer;
80   char *NextRecord = nullptr;
81 
82   template <class T> void writeRecord(const T &R) {
83     internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
84     NextRecord += sizeof(T);
85     // We need this atomic fence here to ensure that other threads attempting to
86     // read the bytes in the buffer will see the writes committed before the
87     // extents are updated.
88     atomic_thread_fence(memory_order_release);
89     atomic_fetch_add(Buffer.Extents, sizeof(T), memory_order_acq_rel);
90   }
91 
92 public:
93   explicit FDRLogWriter(BufferQueue::Buffer &B, char *P)
94       : Buffer(B), NextRecord(P) {
95     DCHECK_NE(Buffer.Data, nullptr);
96     DCHECK_NE(NextRecord, nullptr);
97   }
98 
99   explicit FDRLogWriter(BufferQueue::Buffer &B)
100       : FDRLogWriter(B, static_cast<char *>(B.Data)) {}
101 
102   template <MetadataRecord::RecordKinds Kind, class... Data>
103   bool writeMetadata(Data &&... Ds) {
104     // TODO: Check boundary conditions:
105     // 1) Buffer is full, and cannot handle one metadata record.
106     // 2) Buffer queue is finalising.
107     writeRecord(createMetadataRecord<Kind>(std::forward<Data>(Ds)...));
108     return true;
109   }
110 
111   template <size_t N> size_t writeMetadataRecords(MetadataRecord (&Recs)[N]) {
112     constexpr auto Size = sizeof(MetadataRecord) * N;
113     internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
114     NextRecord += Size;
115     // We need this atomic fence here to ensure that other threads attempting to
116     // read the bytes in the buffer will see the writes committed before the
117     // extents are updated.
118     atomic_thread_fence(memory_order_release);
119     atomic_fetch_add(Buffer.Extents, Size, memory_order_acq_rel);
120     return Size;
121   }
122 
123   enum class FunctionRecordKind : uint8_t {
124     Enter = 0x00,
125     Exit = 0x01,
126     TailExit = 0x02,
127     EnterArg = 0x03,
128   };
129 
130   bool writeFunction(FunctionRecordKind Kind, int32_t FuncId, int32_t Delta) {
131     FunctionRecord R;
132     R.Type = 0;
133     R.RecordKind = uint8_t(Kind);
134     R.FuncId = FuncId;
135     R.TSCDelta = Delta;
136     writeRecord(R);
137     return true;
138   }
139 
140   bool writeFunctionWithArg(FunctionRecordKind Kind, int32_t FuncId,
141                             int32_t Delta, uint64_t Arg) {
142     // We need to write the function with arg into the buffer, and then
143     // atomically update the buffer extents. This ensures that any reads
144     // synchronised on the buffer extents record will always see the writes
145     // that happen before the atomic update.
146     FunctionRecord R;
147     R.Type = 0;
148     R.RecordKind = uint8_t(Kind);
149     R.FuncId = FuncId;
150     R.TSCDelta = Delta;
151     MetadataRecord A =
152         createMetadataRecord<MetadataRecord::RecordKinds::CallArgument>(Arg);
153     NextRecord = reinterpret_cast<char *>(internal_memcpy(
154                      NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
155                  sizeof(R);
156     NextRecord = reinterpret_cast<char *>(internal_memcpy(
157                      NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
158                  sizeof(A);
159     // We need this atomic fence here to ensure that other threads attempting to
160     // read the bytes in the buffer will see the writes committed before the
161     // extents are updated.
162     atomic_thread_fence(memory_order_release);
163     atomic_fetch_add(Buffer.Extents, sizeof(R) + sizeof(A),
164                      memory_order_acq_rel);
165     return true;
166   }
167 
168   bool writeCustomEvent(int32_t Delta, const void *Event, int32_t EventSize) {
169     // We write the metadata record and the custom event data into the buffer
170     // first, before we atomically update the extents for the buffer. This
171     // allows us to ensure that any threads reading the extents of the buffer
172     // will only ever see the full metadata and custom event payload accounted
173     // (no partial writes accounted).
174     MetadataRecord R =
175         createMetadataRecord<MetadataRecord::RecordKinds::CustomEventMarker>(
176             EventSize, Delta);
177     NextRecord = reinterpret_cast<char *>(internal_memcpy(
178                      NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
179                  sizeof(R);
180     NextRecord = reinterpret_cast<char *>(
181                      internal_memcpy(NextRecord, Event, EventSize)) +
182                  EventSize;
183 
184     // We need this atomic fence here to ensure that other threads attempting to
185     // read the bytes in the buffer will see the writes committed before the
186     // extents are updated.
187     atomic_thread_fence(memory_order_release);
188     atomic_fetch_add(Buffer.Extents, sizeof(R) + EventSize,
189                      memory_order_acq_rel);
190     return true;
191   }
192 
193   bool writeTypedEvent(int32_t Delta, uint16_t EventType, const void *Event,
194                        int32_t EventSize) {
195     // We do something similar when writing out typed events, see
196     // writeCustomEvent(...) above for details.
197     MetadataRecord R =
198         createMetadataRecord<MetadataRecord::RecordKinds::TypedEventMarker>(
199             EventSize, Delta, EventType);
200     NextRecord = reinterpret_cast<char *>(internal_memcpy(
201                      NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
202                  sizeof(R);
203     NextRecord = reinterpret_cast<char *>(
204                      internal_memcpy(NextRecord, Event, EventSize)) +
205                  EventSize;
206 
207     // We need this atomic fence here to ensure that other threads attempting to
208     // read the bytes in the buffer will see the writes committed before the
209     // extents are updated.
210     atomic_thread_fence(memory_order_release);
211     atomic_fetch_add(Buffer.Extents, EventSize, memory_order_acq_rel);
212     return true;
213   }
214 
215   char *getNextRecord() const { return NextRecord; }
216 
217   void resetRecord() {
218     NextRecord = reinterpret_cast<char *>(Buffer.Data);
219     atomic_store(Buffer.Extents, 0, memory_order_release);
220   }
221 
222   void undoWrites(size_t B) {
223     DCHECK_GE(NextRecord - B, reinterpret_cast<char *>(Buffer.Data));
224     NextRecord -= B;
225     atomic_fetch_sub(Buffer.Extents, B, memory_order_acq_rel);
226   }
227 
228 }; // namespace __xray
229 
230 } // namespace __xray
231 
232 #endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
233