xref: /llvm-project/bolt/runtime/instr.cpp (revision ad4e0770ca7ebbc4dd6635b17421819b2393aa33)
1 //===- bolt/runtime/instr.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
10 // not support linking modules with dependencies on one another into the final
11 // binary (TODO?), which means this library has to be self-contained in a single
12 // module.
13 //
14 // All extern declarations here need to be defined by BOLT itself. Those will be
15 // undefined symbols that BOLT needs to resolve by emitting these symbols with
16 // MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
17 // for defining the symbols here and these two files have a tight coupling: one
18 // working statically when you run BOLT and another during program runtime when
19 // you run an instrumented binary. The main goal here is to output an fdata file
20 // (BOLT profile) with the instrumentation counters inserted by the static pass.
21 // Counters for indirect calls are an exception, as we can't know them
22 // statically. These counters are created and managed here. To allow this, we
23 // need a minimal framework for allocating memory dynamically. We provide this
24 // with the BumpPtrAllocator class (not LLVM's, but our own version of it).
25 //
26 // Since this code is intended to be inserted into any executable, we decided to
27 // make it standalone and do not depend on any external libraries (i.e. language
28 // support libraries, such as glibc or stdc++). To allow this, we provide a few
29 // light implementations of common OS interacting functionalities using direct
30 // syscall wrappers. Our simple allocator doesn't manage deallocations that
31 // fragment the memory space, so it's stack based. This is the minimal framework
32 // provided here to allow processing instrumented counters and writing fdata.
33 //
34 // In the C++ idiom used here, we never use or rely on constructors or
35 // destructors for global objects. That's because those need support from the
36 // linker in initialization/finalization code, and we want to keep our linker
37 // very simple. Similarly, we don't create any global objects that are zero
38 // initialized, since those would need to go .bss, which our simple linker also
39 // don't support (TODO?).
40 //
41 //===----------------------------------------------------------------------===//
42 
43 #if defined (__x86_64__)
44 #include "common.h"
45 
46 // Enables a very verbose logging to stderr useful when debugging
47 //#define ENABLE_DEBUG
48 
49 #ifdef ENABLE_DEBUG
50 #define DEBUG(X)                                                               \
51   { X; }
52 #else
53 #define DEBUG(X)                                                               \
54   {}
55 #endif
56 
57 #pragma GCC visibility push(hidden)
58 
59 extern "C" {
60 
61 #if defined(__APPLE__)
62 extern uint64_t* _bolt_instr_locations_getter();
63 extern uint32_t _bolt_num_counters_getter();
64 
65 extern uint8_t* _bolt_instr_tables_getter();
66 extern uint32_t _bolt_instr_num_funcs_getter();
67 
68 #else
69 
70 // Main counters inserted by instrumentation, incremented during runtime when
71 // points of interest (locations) in the program are reached. Those are direct
72 // calls and direct and indirect branches (local ones). There are also counters
73 // for basic block execution if they are a spanning tree leaf and need to be
74 // counted in order to infer the execution count of other edges of the CFG.
75 extern uint64_t __bolt_instr_locations[];
76 extern uint32_t __bolt_num_counters;
77 // Descriptions are serialized metadata about binary functions written by BOLT,
78 // so we have a minimal understanding about the program structure. For a
79 // reference on the exact format of this metadata, see *Description structs,
80 // Location, IntrumentedNode and EntryNode.
81 // Number of indirect call site descriptions
82 extern uint32_t __bolt_instr_num_ind_calls;
83 // Number of indirect call target descriptions
84 extern uint32_t __bolt_instr_num_ind_targets;
85 // Number of function descriptions
86 extern uint32_t __bolt_instr_num_funcs;
87 // Time to sleep across dumps (when we write the fdata profile to disk)
88 extern uint32_t __bolt_instr_sleep_time;
89 // Do not clear counters across dumps, rewrite file with the updated values
90 extern bool __bolt_instr_no_counters_clear;
91 // Wait until all forks of instrumented process will finish
92 extern bool __bolt_instr_wait_forks;
93 // Filename to dump data to
94 extern char __bolt_instr_filename[];
95 // Instumented binary file path
96 extern char __bolt_instr_binpath[];
97 // If true, append current PID to the fdata filename when creating it so
98 // different invocations of the same program can be differentiated.
99 extern bool __bolt_instr_use_pid;
100 // Functions that will be used to instrument indirect calls. BOLT static pass
101 // will identify indirect calls and modify them to load the address in these
102 // trampolines and call this address instead. BOLT can't use direct calls to
103 // our handlers because our addresses here are not known at analysis time. We
104 // only support resolving dependencies from this file to the output of BOLT,
105 // *not* the other way around.
106 // TODO: We need better linking support to make that happen.
107 extern void (*__bolt_ind_call_counter_func_pointer)();
108 extern void (*__bolt_ind_tailcall_counter_func_pointer)();
109 // Function pointers to init/fini trampoline routines in the binary, so we can
110 // resume regular execution of these functions that we hooked
111 extern void __bolt_start_trampoline();
112 extern void __bolt_fini_trampoline();
113 
114 #endif
115 }
116 
117 namespace {
118 
119 /// A simple allocator that mmaps a fixed size region and manages this space
120 /// in a stack fashion, meaning you always deallocate the last element that
121 /// was allocated. In practice, we don't need to deallocate individual elements.
122 /// We monotonically increase our usage and then deallocate everything once we
123 /// are done processing something.
124 class BumpPtrAllocator {
125   /// This is written before each allocation and act as a canary to detect when
126   /// a bug caused our program to cross allocation boundaries.
127   struct EntryMetadata {
128     uint64_t Magic;
129     uint64_t AllocSize;
130   };
131 
132 public:
133   void *allocate(size_t Size) {
134     Lock L(M);
135 
136     if (StackBase == nullptr) {
137       StackBase = reinterpret_cast<uint8_t *>(
138           __mmap(0, MaxSize, PROT_READ | PROT_WRITE,
139                  (Shared ? MAP_SHARED : MAP_PRIVATE) | MAP_ANONYMOUS, -1, 0));
140       assert(StackBase != MAP_FAILED,
141              "BumpPtrAllocator: failed to mmap stack!");
142       StackSize = 0;
143     }
144 
145     Size = alignTo(Size + sizeof(EntryMetadata), 16);
146     uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);
147     auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);
148     M->Magic = Magic;
149     M->AllocSize = Size;
150     StackSize += Size;
151     assert(StackSize < MaxSize, "allocator ran out of memory");
152     return AllocAddress;
153   }
154 
155 #ifdef DEBUG
156   /// Element-wise deallocation is only used for debugging to catch memory
157   /// bugs by checking magic bytes. Ordinarily, we reset the allocator once
158   /// we are done with it. Reset is done with clear(). There's no need
159   /// to deallocate each element individually.
160   void deallocate(void *Ptr) {
161     Lock L(M);
162     uint8_t MetadataOffset = sizeof(EntryMetadata);
163     auto *M = reinterpret_cast<EntryMetadata *>(
164         reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);
165     const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;
166     // Validate size
167     if (Ptr != StackTop - M->AllocSize) {
168       // Failed validation, check if it is a pointer returned by operator new []
169       MetadataOffset +=
170           sizeof(uint64_t); // Space for number of elements alloc'ed
171       M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -
172                                             MetadataOffset);
173       // Ok, it failed both checks if this assertion fails. Stop the program, we
174       // have a memory bug.
175       assert(Ptr == StackTop - M->AllocSize,
176              "must deallocate the last element alloc'ed");
177     }
178     assert(M->Magic == Magic, "allocator magic is corrupt");
179     StackSize -= M->AllocSize;
180   }
181 #else
182   void deallocate(void *) {}
183 #endif
184 
185   void clear() {
186     Lock L(M);
187     StackSize = 0;
188   }
189 
190   /// Set mmap reservation size (only relevant before first allocation)
191   void setMaxSize(uint64_t Size) { MaxSize = Size; }
192 
193   /// Set mmap reservation privacy (only relevant before first allocation)
194   void setShared(bool S) { Shared = S; }
195 
196   void destroy() {
197     if (StackBase == nullptr)
198       return;
199     __munmap(StackBase, MaxSize);
200   }
201 
202 private:
203   static constexpr uint64_t Magic = 0x1122334455667788ull;
204   uint64_t MaxSize = 0xa00000;
205   uint8_t *StackBase{nullptr};
206   uint64_t StackSize{0};
207   bool Shared{false};
208   Mutex M;
209 };
210 
211 /// Used for allocating indirect call instrumentation counters. Initialized by
212 /// __bolt_instr_setup, our initialization routine.
213 BumpPtrAllocator *GlobalAlloc;
214 
215 // Storage for GlobalAlloc which can be shared if not using
216 // instrumentation-file-append-pid.
217 void *GlobalMetadataStorage;
218 
219 } // anonymous namespace
220 
221 // User-defined placement new operators. We only use those (as opposed to
222 // overriding the regular operator new) so we can keep our allocator in the
223 // stack instead of in a data section (global).
224 void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }
225 void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {
226   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
227   memset(Ptr, C, Sz);
228   return Ptr;
229 }
230 void *operator new[](size_t Sz, BumpPtrAllocator &A) {
231   return A.allocate(Sz);
232 }
233 void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {
234   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
235   memset(Ptr, C, Sz);
236   return Ptr;
237 }
238 
239 // Declaration for global allocator to construct it in shared memory if needed.
240 // Needed because we can't #include <new>
241 void *operator new(size_t, void *) noexcept;
242 // Only called during exception unwinding (useless). We must manually dealloc.
243 // C++ language weirdness
244 void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }
245 
246 namespace {
247 
248 // Disable instrumentation optimizations that sacrifice profile accuracy
249 extern "C" bool __bolt_instr_conservative;
250 
251 /// Basic key-val atom stored in our hash
252 struct SimpleHashTableEntryBase {
253   uint64_t Key;
254   uint64_t Val;
255 };
256 
257 /// This hash table implementation starts by allocating a table of size
258 /// InitialSize. When conflicts happen in this main table, it resolves
259 /// them by chaining a new table of size IncSize. It never reallocs as our
260 /// allocator doesn't support it. The key is intended to be function pointers.
261 /// There's no clever hash function (it's just x mod size, size being prime).
262 /// I never tuned the coefficientes in the modular equation (TODO)
263 /// This is used for indirect calls (each call site has one of this, so it
264 /// should have a small footprint) and for tallying call counts globally for
265 /// each target to check if we missed the origin of some calls (this one is a
266 /// large instantiation of this template, since it is global for all call sites)
267 template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,
268           uint32_t IncSize = 7>
269 class SimpleHashTable {
270 public:
271   using MapEntry = T;
272 
273   /// Increment by 1 the value of \p Key. If it is not in this table, it will be
274   /// added to the table and its value set to 1.
275   void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {
276     if (!__bolt_instr_conservative) {
277       TryLock L(M);
278       if (!L.isLocked())
279         return;
280       auto &E = getOrAllocEntry(Key, Alloc);
281       ++E.Val;
282       return;
283     }
284     Lock L(M);
285     auto &E = getOrAllocEntry(Key, Alloc);
286     ++E.Val;
287   }
288 
289   /// Basic member accessing interface. Here we pass the allocator explicitly to
290   /// avoid storing a pointer to it as part of this table (remember there is one
291   /// hash for each indirect call site, so we wan't to minimize our footprint).
292   MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {
293     if (!__bolt_instr_conservative) {
294       TryLock L(M);
295       if (!L.isLocked())
296         return NoEntry;
297       return getOrAllocEntry(Key, Alloc);
298     }
299     Lock L(M);
300     return getOrAllocEntry(Key, Alloc);
301   }
302 
303   /// Traverses all elements in the table
304   template <typename... Args>
305   void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {
306     Lock L(M);
307     if (!TableRoot)
308       return;
309     return forEachElement(Callback, InitialSize, TableRoot, args...);
310   }
311 
312   void resetCounters();
313 
314 private:
315   constexpr static uint64_t VacantMarker = 0;
316   constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;
317 
318   MapEntry *TableRoot{nullptr};
319   MapEntry NoEntry;
320   Mutex M;
321 
322   template <typename... Args>
323   void forEachElement(void (*Callback)(MapEntry &, Args...),
324                       uint32_t NumEntries, MapEntry *Entries, Args... args) {
325     for (uint32_t I = 0; I < NumEntries; ++I) {
326       MapEntry &Entry = Entries[I];
327       if (Entry.Key == VacantMarker)
328         continue;
329       if (Entry.Key & FollowUpTableMarker) {
330         MapEntry *Next =
331             reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker);
332         assert(Next != Entries, "Circular reference!");
333         forEachElement(Callback, IncSize, Next, args...);
334         continue;
335       }
336       Callback(Entry, args...);
337     }
338   }
339 
340   MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {
341     TableRoot = new (Alloc, 0) MapEntry[InitialSize];
342     MapEntry &Entry = TableRoot[Key % InitialSize];
343     Entry.Key = Key;
344     return Entry;
345   }
346 
347   MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,
348                      BumpPtrAllocator &Alloc, int CurLevel) {
349     const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;
350     uint64_t Remainder = Selector / NumEntries;
351     Selector = Selector % NumEntries;
352     MapEntry &Entry = Entries[Selector];
353 
354     // A hit
355     if (Entry.Key == Key) {
356       return Entry;
357     }
358 
359     // Vacant - add new entry
360     if (Entry.Key == VacantMarker) {
361       Entry.Key = Key;
362       return Entry;
363     }
364 
365     // Defer to the next level
366     if (Entry.Key & FollowUpTableMarker) {
367       return getEntry(
368           reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),
369           Key, Remainder, Alloc, CurLevel + 1);
370     }
371 
372     // Conflict - create the next level
373     MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];
374     uint64_t CurEntrySelector = Entry.Key / InitialSize;
375     for (int I = 0; I < CurLevel; ++I)
376       CurEntrySelector /= IncSize;
377     CurEntrySelector = CurEntrySelector % IncSize;
378     NextLevelTbl[CurEntrySelector] = Entry;
379     Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;
380     assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) !=
381                uint64_t(Entries),
382            "circular reference created!\n");
383     return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);
384   }
385 
386   MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {
387     if (TableRoot) {
388       MapEntry &E = getEntry(TableRoot, Key, Key, Alloc, 0);
389       assert(!(E.Key & FollowUpTableMarker), "Invalid entry!");
390       return E;
391     }
392     return firstAllocation(Key, Alloc);
393   }
394 };
395 
396 template <typename T> void resetIndCallCounter(T &Entry) {
397   Entry.Val = 0;
398 }
399 
400 template <typename T, uint32_t X, uint32_t Y>
401 void SimpleHashTable<T, X, Y>::resetCounters() {
402   forEachElement(resetIndCallCounter);
403 }
404 
405 /// Represents a hash table mapping a function target address to its counter.
406 using IndirectCallHashTable = SimpleHashTable<>;
407 
408 /// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
409 /// global array of all hash tables storing indirect call destinations happening
410 /// during runtime, one table per call site.
411 IndirectCallHashTable *GlobalIndCallCounters{
412     reinterpret_cast<IndirectCallHashTable *>(1)};
413 
414 /// Don't allow reentrancy in the fdata writing phase - only one thread writes
415 /// it
416 Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};
417 
418 /// Store number of calls in additional to target address (Key) and frequency
419 /// as perceived by the basic block counter (Val).
420 struct CallFlowEntryBase : public SimpleHashTableEntryBase {
421   uint64_t Calls;
422 };
423 
424 using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;
425 
426 /// This is a large table indexing all possible call targets (indirect and
427 /// direct ones). The goal is to find mismatches between number of calls (for
428 /// those calls we were able to track) and the entry basic block counter of the
429 /// callee. In most cases, these two should be equal. If not, there are two
430 /// possible scenarios here:
431 ///
432 ///  * Entry BB has higher frequency than all known calls to this function.
433 ///    In this case, we have dynamic library code or any uninstrumented code
434 ///    calling this function. We will write the profile for these untracked
435 ///    calls as having source "0 [unknown] 0" in the fdata file.
436 ///
437 ///  * Number of known calls is higher than the frequency of entry BB
438 ///    This only happens when there is no counter for the entry BB / callee
439 ///    function is not simple (in BOLT terms). We don't do anything special
440 ///    here and just ignore those (we still report all calls to the non-simple
441 ///    function, though).
442 ///
443 class CallFlowHashTable : public CallFlowHashTableBase {
444 public:
445   CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
446 
447   MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }
448 
449 private:
450   // Different than the hash table for indirect call targets, we do store the
451   // allocator here since there is only one call flow hash and space overhead
452   // is negligible.
453   BumpPtrAllocator &Alloc;
454 };
455 
456 ///
457 /// Description metadata emitted by BOLT to describe the program - refer to
458 /// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
459 ///
460 struct Location {
461   uint32_t FunctionName;
462   uint32_t Offset;
463 };
464 
465 struct CallDescription {
466   Location From;
467   uint32_t FromNode;
468   Location To;
469   uint32_t Counter;
470   uint64_t TargetAddress;
471 };
472 
473 using IndCallDescription = Location;
474 
475 struct IndCallTargetDescription {
476   Location Loc;
477   uint64_t Address;
478 };
479 
480 struct EdgeDescription {
481   Location From;
482   uint32_t FromNode;
483   Location To;
484   uint32_t ToNode;
485   uint32_t Counter;
486 };
487 
488 struct InstrumentedNode {
489   uint32_t Node;
490   uint32_t Counter;
491 };
492 
493 struct EntryNode {
494   uint64_t Node;
495   uint64_t Address;
496 };
497 
498 struct FunctionDescription {
499   uint32_t NumLeafNodes;
500   const InstrumentedNode *LeafNodes;
501   uint32_t NumEdges;
502   const EdgeDescription *Edges;
503   uint32_t NumCalls;
504   const CallDescription *Calls;
505   uint32_t NumEntryNodes;
506   const EntryNode *EntryNodes;
507 
508   /// Constructor will parse the serialized function metadata written by BOLT
509   FunctionDescription(const uint8_t *FuncDesc);
510 
511   uint64_t getSize() const {
512     return 16 + NumLeafNodes * sizeof(InstrumentedNode) +
513            NumEdges * sizeof(EdgeDescription) +
514            NumCalls * sizeof(CallDescription) +
515            NumEntryNodes * sizeof(EntryNode);
516   }
517 };
518 
519 /// The context is created when the fdata profile needs to be written to disk
520 /// and we need to interpret our runtime counters. It contains pointers to the
521 /// mmaped binary (only the BOLT written metadata section). Deserialization
522 /// should be straightforward as most data is POD or an array of POD elements.
523 /// This metadata is used to reconstruct function CFGs.
524 struct ProfileWriterContext {
525   IndCallDescription *IndCallDescriptions;
526   IndCallTargetDescription *IndCallTargets;
527   uint8_t *FuncDescriptions;
528   char *Strings;  // String table with function names used in this binary
529   int FileDesc;   // File descriptor for the file on disk backing this
530                   // information in memory via mmap
531   void *MMapPtr;  // The mmap ptr
532   int MMapSize;   // The mmap size
533 
534   /// Hash table storing all possible call destinations to detect untracked
535   /// calls and correctly report them as [unknown] in output fdata.
536   CallFlowHashTable *CallFlowTable;
537 
538   /// Lookup the sorted indirect call target vector to fetch function name and
539   /// offset for an arbitrary function pointer.
540   const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;
541 };
542 
543 /// Perform a string comparison and returns zero if Str1 matches Str2. Compares
544 /// at most Size characters.
545 int compareStr(const char *Str1, const char *Str2, int Size) {
546   while (*Str1 == *Str2) {
547     if (*Str1 == '\0' || --Size == 0)
548       return 0;
549     ++Str1;
550     ++Str2;
551   }
552   return 1;
553 }
554 
555 /// Output Location to the fdata file
556 char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,
557                    const Location Loc, uint32_t BufSize) {
558   // fdata location format: Type Name Offset
559   // Type 1 - regular symbol
560   OutBuf = strCopy(OutBuf, "1 ");
561   const char *Str = Ctx.Strings + Loc.FunctionName;
562   uint32_t Size = 25;
563   while (*Str) {
564     *OutBuf++ = *Str++;
565     if (++Size >= BufSize)
566       break;
567   }
568   assert(!*Str, "buffer overflow, function name too large");
569   *OutBuf++ = ' ';
570   OutBuf = intToStr(OutBuf, Loc.Offset, 16);
571   *OutBuf++ = ' ';
572   return OutBuf;
573 }
574 
575 /// Read and deserialize a function description written by BOLT. \p FuncDesc
576 /// points at the beginning of the function metadata structure in the file.
577 /// See Instrumentation::emitTablesAsELFNote()
578 FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {
579   NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);
580   DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10));
581   LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);
582 
583   NumEdges = *reinterpret_cast<const uint32_t *>(
584       FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));
585   DEBUG(reportNumber("NumEdges = ", NumEdges, 10));
586   Edges = reinterpret_cast<const EdgeDescription *>(
587       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));
588 
589   NumCalls = *reinterpret_cast<const uint32_t *>(
590       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +
591       NumEdges * sizeof(EdgeDescription));
592   DEBUG(reportNumber("NumCalls = ", NumCalls, 10));
593   Calls = reinterpret_cast<const CallDescription *>(
594       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
595       NumEdges * sizeof(EdgeDescription));
596   NumEntryNodes = *reinterpret_cast<const uint32_t *>(
597       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
598       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
599   DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10));
600   EntryNodes = reinterpret_cast<const EntryNode *>(
601       FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +
602       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
603 }
604 
605 /// Read and mmap descriptions written by BOLT from the executable's notes
606 /// section
607 #if defined(HAVE_ELF_H) and !defined(__APPLE__)
608 
609 void *__attribute__((noinline)) __get_pc() {
610   return __builtin_extract_return_addr(__builtin_return_address(0));
611 }
612 
613 /// Get string with address and parse it to hex pair <StartAddress, EndAddress>
614 bool parseAddressRange(const char *Str, uint64_t &StartAddress,
615                        uint64_t &EndAddress) {
616   if (!Str)
617     return false;
618   // Parsed string format: <hex1>-<hex2>
619   StartAddress = hexToLong(Str, '-');
620   while (*Str && *Str != '-')
621     ++Str;
622   if (!*Str)
623     return false;
624   ++Str; // swallow '-'
625   EndAddress = hexToLong(Str);
626   return true;
627 }
628 
629 /// Get full path to the real binary by getting current virtual address
630 /// and searching for the appropriate link in address range in
631 /// /proc/self/map_files
632 static char *getBinaryPath() {
633   const uint32_t BufSize = 1024;
634   const uint32_t NameMax = 4096;
635   const char DirPath[] = "/proc/self/map_files/";
636   static char TargetPath[NameMax] = {};
637   char Buf[BufSize];
638 
639   if (__bolt_instr_binpath[0] != '\0')
640     return __bolt_instr_binpath;
641 
642   if (TargetPath[0] != '\0')
643     return TargetPath;
644 
645   unsigned long CurAddr = (unsigned long)__get_pc();
646   uint64_t FDdir = __open(DirPath,
647                           /*flags=*/0 /*O_RDONLY*/,
648                           /*mode=*/0666);
649   assert(static_cast<int64_t>(FDdir) >= 0,
650          "failed to open /proc/self/map_files");
651 
652   while (long Nread = __getdents(FDdir, (struct dirent *)Buf, BufSize)) {
653     assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");
654 
655     struct dirent *d;
656     for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {
657       d = (struct dirent *)(Buf + Bpos);
658 
659       uint64_t StartAddress, EndAddress;
660       if (!parseAddressRange(d->d_name, StartAddress, EndAddress))
661         continue;
662       if (CurAddr < StartAddress || CurAddr > EndAddress)
663         continue;
664       char FindBuf[NameMax];
665       char *C = strCopy(FindBuf, DirPath, NameMax);
666       C = strCopy(C, d->d_name, NameMax - (C - FindBuf));
667       *C = '\0';
668       uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));
669       assert(Ret != -1 && Ret != BufSize, "readlink error");
670       TargetPath[Ret] = '\0';
671       return TargetPath;
672     }
673   }
674   return nullptr;
675 }
676 
677 ProfileWriterContext readDescriptions() {
678   ProfileWriterContext Result;
679   char *BinPath = getBinaryPath();
680   assert(BinPath && BinPath[0] != '\0', "failed to find binary path");
681 
682   uint64_t FD = __open(BinPath,
683                        /*flags=*/0 /*O_RDONLY*/,
684                        /*mode=*/0666);
685   assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");
686 
687   Result.FileDesc = FD;
688 
689   // mmap our binary to memory
690   uint64_t Size = __lseek(FD, 0, 2 /*SEEK_END*/);
691   uint8_t *BinContents = reinterpret_cast<uint8_t *>(
692       __mmap(0, Size, PROT_READ, MAP_PRIVATE, FD, 0));
693   assert(BinContents != MAP_FAILED, "readDescriptions: Failed to mmap self!");
694   Result.MMapPtr = BinContents;
695   Result.MMapSize = Size;
696   Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);
697   Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);
698   Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(
699       BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);
700 
701   // Find .bolt.instr.tables with the data we need and set pointers to it
702   for (int I = 0; I < Hdr->e_shnum; ++I) {
703     char *SecName = reinterpret_cast<char *>(
704         BinContents + StringTblHeader->sh_offset + Shdr->sh_name);
705     if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {
706       Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +
707                                             (I + 1) * Hdr->e_shentsize);
708       continue;
709     }
710     // Actual contents of the ELF note start after offset 20 decimal:
711     // Offset 0: Producer name size (4 bytes)
712     // Offset 4: Contents size (4 bytes)
713     // Offset 8: Note type (4 bytes)
714     // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)
715     // Offset 20: Contents
716     uint32_t IndCallDescSize =
717         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);
718     uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(
719         BinContents + Shdr->sh_offset + 24 + IndCallDescSize);
720     uint32_t FuncDescSize =
721         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +
722                                       IndCallDescSize + IndCallTargetDescSize);
723     Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(
724         BinContents + Shdr->sh_offset + 24);
725     Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
726         BinContents + Shdr->sh_offset + 28 + IndCallDescSize);
727     Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +
728                               IndCallDescSize + IndCallTargetDescSize;
729     Result.Strings = reinterpret_cast<char *>(
730         BinContents + Shdr->sh_offset + 32 + IndCallDescSize +
731         IndCallTargetDescSize + FuncDescSize);
732     return Result;
733   }
734   const char ErrMsg[] =
735       "BOLT instrumentation runtime error: could not find section "
736       ".bolt.instr.tables\n";
737   reportError(ErrMsg, sizeof(ErrMsg));
738   return Result;
739 }
740 
741 #else
742 
743 ProfileWriterContext readDescriptions() {
744   ProfileWriterContext Result;
745   uint8_t *Tables = _bolt_instr_tables_getter();
746   uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);
747   uint32_t IndCallTargetDescSize =
748       *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);
749   uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(
750       Tables + 8 + IndCallDescSize + IndCallTargetDescSize);
751   Result.IndCallDescriptions =
752       reinterpret_cast<IndCallDescription *>(Tables + 4);
753   Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
754       Tables + 8 + IndCallDescSize);
755   Result.FuncDescriptions =
756       Tables + 12 + IndCallDescSize + IndCallTargetDescSize;
757   Result.Strings = reinterpret_cast<char *>(
758       Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);
759   return Result;
760 }
761 
762 #endif
763 
764 #if !defined(__APPLE__)
765 /// Debug by printing overall metadata global numbers to check it is sane
766 void printStats(const ProfileWriterContext &Ctx) {
767   char StatMsg[BufSize];
768   char *StatPtr = StatMsg;
769   StatPtr =
770       strCopy(StatPtr,
771               "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");
772   StatPtr = intToStr(StatPtr,
773                      Ctx.FuncDescriptions -
774                          reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),
775                      10);
776   StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");
777   StatPtr = intToStr(
778       StatPtr,
779       reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);
780   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");
781   StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);
782   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");
783   StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);
784   StatPtr = strCopy(StatPtr, "\n");
785   __write(2, StatMsg, StatPtr - StatMsg);
786 }
787 #endif
788 
789 
790 /// This is part of a simple CFG representation in memory, where we store
791 /// a dynamically sized array of input and output edges per node, and store
792 /// a dynamically sized array of nodes per graph. We also store the spanning
793 /// tree edges for that CFG in a separate array of nodes in
794 /// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
795 struct Edge {
796   uint32_t Node; // Index in nodes array regarding the destination of this edge
797   uint32_t ID;   // Edge index in an array comprising all edges of the graph
798 };
799 
800 /// A regular graph node or a spanning tree node
801 struct Node {
802   uint32_t NumInEdges{0};  // Input edge count used to size InEdge
803   uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges
804   Edge *InEdges{nullptr};  // Created and managed by \p Graph
805   Edge *OutEdges{nullptr}; // ditto
806 };
807 
808 /// Main class for CFG representation in memory. Manages object creation and
809 /// destruction, populates an array of CFG nodes as well as corresponding
810 /// spanning tree nodes.
811 struct Graph {
812   uint32_t NumNodes;
813   Node *CFGNodes;
814   Node *SpanningTreeNodes;
815   uint64_t *EdgeFreqs;
816   uint64_t *CallFreqs;
817   BumpPtrAllocator &Alloc;
818   const FunctionDescription &D;
819 
820   /// Reads a list of edges from function description \p D and builds
821   /// the graph from it. Allocates several internal dynamic structures that are
822   /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all
823   /// spanning tree leaf nodes descriptions (their counters). They are the seed
824   /// used to compute the rest of the missing edge counts in a bottom-up
825   /// traversal of the spanning tree.
826   Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
827         const uint64_t *Counters, ProfileWriterContext &Ctx);
828   ~Graph();
829   void dump() const;
830 
831 private:
832   void computeEdgeFrequencies(const uint64_t *Counters,
833                               ProfileWriterContext &Ctx);
834   void dumpEdgeFreqs() const;
835 };
836 
837 Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
838              const uint64_t *Counters, ProfileWriterContext &Ctx)
839     : Alloc(Alloc), D(D) {
840   DEBUG(reportNumber("G = 0x", (uint64_t)this, 16));
841   // First pass to determine number of nodes
842   int32_t MaxNodes = -1;
843   CallFreqs = nullptr;
844   EdgeFreqs = nullptr;
845   for (int I = 0; I < D.NumEdges; ++I) {
846     if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)
847       MaxNodes = D.Edges[I].FromNode;
848     if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)
849       MaxNodes = D.Edges[I].ToNode;
850   }
851 
852   for (int I = 0; I < D.NumLeafNodes; ++I)
853     if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)
854       MaxNodes = D.LeafNodes[I].Node;
855 
856   for (int I = 0; I < D.NumCalls; ++I)
857     if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)
858       MaxNodes = D.Calls[I].FromNode;
859 
860   // No nodes? Nothing to do
861   if (MaxNodes < 0) {
862     DEBUG(report("No nodes!\n"));
863     CFGNodes = nullptr;
864     SpanningTreeNodes = nullptr;
865     NumNodes = 0;
866     return;
867   }
868   ++MaxNodes;
869   DEBUG(reportNumber("NumNodes = ", MaxNodes, 10));
870   NumNodes = static_cast<uint32_t>(MaxNodes);
871 
872   // Initial allocations
873   CFGNodes = new (Alloc) Node[MaxNodes];
874 
875   DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16));
876   SpanningTreeNodes = new (Alloc) Node[MaxNodes];
877   DEBUG(reportNumber("G->SpanningTreeNodes = 0x",
878                      (uint64_t)SpanningTreeNodes, 16));
879 
880   // Figure out how much to allocate to each vector (in/out edge sets)
881   for (int I = 0; I < D.NumEdges; ++I) {
882     CFGNodes[D.Edges[I].FromNode].NumOutEdges++;
883     CFGNodes[D.Edges[I].ToNode].NumInEdges++;
884     if (D.Edges[I].Counter != 0xffffffff)
885       continue;
886 
887     SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;
888     SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;
889   }
890 
891   // Allocate in/out edge sets
892   for (int I = 0; I < MaxNodes; ++I) {
893     if (CFGNodes[I].NumInEdges > 0)
894       CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];
895     if (CFGNodes[I].NumOutEdges > 0)
896       CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];
897     if (SpanningTreeNodes[I].NumInEdges > 0)
898       SpanningTreeNodes[I].InEdges =
899           new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];
900     if (SpanningTreeNodes[I].NumOutEdges > 0)
901       SpanningTreeNodes[I].OutEdges =
902           new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];
903     CFGNodes[I].NumInEdges = 0;
904     CFGNodes[I].NumOutEdges = 0;
905     SpanningTreeNodes[I].NumInEdges = 0;
906     SpanningTreeNodes[I].NumOutEdges = 0;
907   }
908 
909   // Fill in/out edge sets
910   for (int I = 0; I < D.NumEdges; ++I) {
911     const uint32_t Src = D.Edges[I].FromNode;
912     const uint32_t Dst = D.Edges[I].ToNode;
913     Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];
914     E->Node = Dst;
915     E->ID = I;
916 
917     E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];
918     E->Node = Src;
919     E->ID = I;
920 
921     if (D.Edges[I].Counter != 0xffffffff)
922       continue;
923 
924     E = &SpanningTreeNodes[Src]
925              .OutEdges[SpanningTreeNodes[Src].NumOutEdges++];
926     E->Node = Dst;
927     E->ID = I;
928 
929     E = &SpanningTreeNodes[Dst]
930              .InEdges[SpanningTreeNodes[Dst].NumInEdges++];
931     E->Node = Src;
932     E->ID = I;
933   }
934 
935   computeEdgeFrequencies(Counters, Ctx);
936 }
937 
938 Graph::~Graph() {
939   if (CallFreqs)
940     Alloc.deallocate(CallFreqs);
941   if (EdgeFreqs)
942     Alloc.deallocate(EdgeFreqs);
943   for (int I = NumNodes - 1; I >= 0; --I) {
944     if (SpanningTreeNodes[I].OutEdges)
945       Alloc.deallocate(SpanningTreeNodes[I].OutEdges);
946     if (SpanningTreeNodes[I].InEdges)
947       Alloc.deallocate(SpanningTreeNodes[I].InEdges);
948     if (CFGNodes[I].OutEdges)
949       Alloc.deallocate(CFGNodes[I].OutEdges);
950     if (CFGNodes[I].InEdges)
951       Alloc.deallocate(CFGNodes[I].InEdges);
952   }
953   if (SpanningTreeNodes)
954     Alloc.deallocate(SpanningTreeNodes);
955   if (CFGNodes)
956     Alloc.deallocate(CFGNodes);
957 }
958 
959 void Graph::dump() const {
960   reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);
961   report("  Full graph:\n");
962   for (int I = 0; I < NumNodes; ++I) {
963     const Node *N = &CFGNodes[I];
964     reportNumber("    Node #", I, 10);
965     reportNumber("      InEdges total ", N->NumInEdges, 10);
966     for (int J = 0; J < N->NumInEdges; ++J)
967       reportNumber("        ", N->InEdges[J].Node, 10);
968     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
969     for (int J = 0; J < N->NumOutEdges; ++J)
970       reportNumber("        ", N->OutEdges[J].Node, 10);
971     report("\n");
972   }
973   report("  Spanning tree:\n");
974   for (int I = 0; I < NumNodes; ++I) {
975     const Node *N = &SpanningTreeNodes[I];
976     reportNumber("    Node #", I, 10);
977     reportNumber("      InEdges total ", N->NumInEdges, 10);
978     for (int J = 0; J < N->NumInEdges; ++J)
979       reportNumber("        ", N->InEdges[J].Node, 10);
980     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
981     for (int J = 0; J < N->NumOutEdges; ++J)
982       reportNumber("        ", N->OutEdges[J].Node, 10);
983     report("\n");
984   }
985 }
986 
987 void Graph::dumpEdgeFreqs() const {
988   reportNumber(
989       "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);
990   for (int I = 0; I < D.NumEdges; ++I) {
991     reportNumber("* Src: ", D.Edges[I].FromNode, 10);
992     reportNumber("  Dst: ", D.Edges[I].ToNode, 10);
993     reportNumber("    Cnt: ", EdgeFreqs[I], 10);
994   }
995 }
996 
997 /// Auxiliary map structure for fast lookups of which calls map to each node of
998 /// the function CFG
999 struct NodeToCallsMap {
1000   struct MapEntry {
1001     uint32_t NumCalls;
1002     uint32_t *Calls;
1003   };
1004   MapEntry *Entries;
1005   BumpPtrAllocator &Alloc;
1006   const uint32_t NumNodes;
1007 
1008   NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,
1009                  uint32_t NumNodes)
1010       : Alloc(Alloc), NumNodes(NumNodes) {
1011     Entries = new (Alloc, 0) MapEntry[NumNodes];
1012     for (int I = 0; I < D.NumCalls; ++I) {
1013       DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10));
1014       ++Entries[D.Calls[I].FromNode].NumCalls;
1015     }
1016     for (int I = 0; I < NumNodes; ++I) {
1017       Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)
1018                                                    uint32_t[Entries[I].NumCalls]
1019                                              : nullptr;
1020       Entries[I].NumCalls = 0;
1021     }
1022     for (int I = 0; I < D.NumCalls; ++I) {
1023       MapEntry &Entry = Entries[D.Calls[I].FromNode];
1024       Entry.Calls[Entry.NumCalls++] = I;
1025     }
1026   }
1027 
1028   /// Set the frequency of all calls in node \p NodeID to Freq. However, if
1029   /// the calls have their own counters and do not depend on the basic block
1030   /// counter, this means they have landing pads and throw exceptions. In this
1031   /// case, set their frequency with their counters and return the maximum
1032   /// value observed in such counters. This will be used as the new frequency
1033   /// at basic block entry. This is used to fix the CFG edge frequencies in the
1034   /// presence of exceptions.
1035   uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,
1036                            const FunctionDescription &D,
1037                            const uint64_t *Counters,
1038                            ProfileWriterContext &Ctx) const {
1039     const MapEntry &Entry = Entries[NodeID];
1040     uint64_t MaxValue = 0ull;
1041     for (int I = 0, E = Entry.NumCalls; I != E; ++I) {
1042       const uint32_t CallID = Entry.Calls[I];
1043       DEBUG(reportNumber("  Setting freq for call ID: ", CallID, 10));
1044       const CallDescription &CallDesc = D.Calls[CallID];
1045       if (CallDesc.Counter == 0xffffffff) {
1046         CallFreqs[CallID] = Freq;
1047         DEBUG(reportNumber("  with : ", Freq, 10));
1048       } else {
1049         const uint64_t CounterVal = Counters[CallDesc.Counter];
1050         CallFreqs[CallID] = CounterVal;
1051         MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;
1052         DEBUG(reportNumber("  with (private counter) : ", CounterVal, 10));
1053       }
1054       DEBUG(reportNumber("  Address: 0x", CallDesc.TargetAddress, 16));
1055       if (CallFreqs[CallID] > 0)
1056         Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=
1057             CallFreqs[CallID];
1058     }
1059     return MaxValue;
1060   }
1061 
1062   ~NodeToCallsMap() {
1063     for (int I = NumNodes - 1; I >= 0; --I)
1064       if (Entries[I].Calls)
1065         Alloc.deallocate(Entries[I].Calls);
1066     Alloc.deallocate(Entries);
1067   }
1068 };
1069 
1070 /// Fill an array with the frequency of each edge in the function represented
1071 /// by G, as well as another array for each call.
1072 void Graph::computeEdgeFrequencies(const uint64_t *Counters,
1073                                    ProfileWriterContext &Ctx) {
1074   if (NumNodes == 0)
1075     return;
1076 
1077   EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;
1078   CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;
1079 
1080   // Setup a lookup for calls present in each node (BB)
1081   NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);
1082 
1083   // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the
1084   // spanning tree don't have explicit counters. We must infer their value using
1085   // a linear combination of other counters (sum of counters of the outgoing
1086   // edges minus sum of counters of the incoming edges).
1087   uint32_t *Stack = new (Alloc) uint32_t [NumNodes];
1088   uint32_t StackTop = 0;
1089   enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };
1090   Status *Visited = new (Alloc, 0) Status[NumNodes];
1091   uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];
1092   uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];
1093 
1094   // Setup a fast lookup for frequency of leaf nodes, which have special
1095   // basic block frequency instrumentation (they are not edge profiled).
1096   for (int I = 0; I < D.NumLeafNodes; ++I) {
1097     LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];
1098     DEBUG({
1099       if (Counters[D.LeafNodes[I].Counter] > 0) {
1100         reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);
1101         reportNumber("     Counter: ", Counters[D.LeafNodes[I].Counter], 10);
1102       }
1103     });
1104   }
1105   for (int I = 0; I < D.NumEntryNodes; ++I) {
1106     EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;
1107     DEBUG({
1108         reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);
1109         reportNumber("      Address: ", D.EntryNodes[I].Address, 16);
1110     });
1111   }
1112   // Add all root nodes to the stack
1113   for (int I = 0; I < NumNodes; ++I)
1114     if (SpanningTreeNodes[I].NumInEdges == 0)
1115       Stack[StackTop++] = I;
1116 
1117   // Empty stack?
1118   if (StackTop == 0) {
1119     DEBUG(report("Empty stack!\n"));
1120     Alloc.deallocate(EntryAddress);
1121     Alloc.deallocate(LeafFrequency);
1122     Alloc.deallocate(Visited);
1123     Alloc.deallocate(Stack);
1124     CallMap->~NodeToCallsMap();
1125     Alloc.deallocate(CallMap);
1126     if (CallFreqs)
1127       Alloc.deallocate(CallFreqs);
1128     if (EdgeFreqs)
1129       Alloc.deallocate(EdgeFreqs);
1130     EdgeFreqs = nullptr;
1131     CallFreqs = nullptr;
1132     return;
1133   }
1134   // Add all known edge counts, will infer the rest
1135   for (int I = 0; I < D.NumEdges; ++I) {
1136     const uint32_t C = D.Edges[I].Counter;
1137     if (C == 0xffffffff) // inferred counter - we will compute its value
1138       continue;
1139     EdgeFreqs[I] = Counters[C];
1140   }
1141 
1142   while (StackTop > 0) {
1143     const uint32_t Cur = Stack[--StackTop];
1144     DEBUG({
1145       if (Visited[Cur] == S_VISITING)
1146         report("(visiting) ");
1147       else
1148         report("(new) ");
1149       reportNumber("Cur: ", Cur, 10);
1150     });
1151 
1152     // This shouldn't happen in a tree
1153     assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");
1154     if (Visited[Cur] == S_NEW) {
1155       Visited[Cur] = S_VISITING;
1156       Stack[StackTop++] = Cur;
1157       assert(StackTop <= NumNodes, "stack grew too large");
1158       for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {
1159         const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;
1160         Stack[StackTop++] = Succ;
1161         assert(StackTop <= NumNodes, "stack grew too large");
1162       }
1163       continue;
1164     }
1165     Visited[Cur] = S_VISITED;
1166 
1167     // Establish our node frequency based on outgoing edges, which should all be
1168     // resolved by now.
1169     int64_t CurNodeFreq = LeafFrequency[Cur];
1170     // Not a leaf?
1171     if (!CurNodeFreq) {
1172       for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {
1173         const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;
1174         CurNodeFreq += EdgeFreqs[SuccEdge];
1175       }
1176     }
1177     if (CurNodeFreq < 0)
1178       CurNodeFreq = 0;
1179 
1180     const uint64_t CallFreq = CallMap->visitAllCallsIn(
1181         Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx);
1182 
1183     // Exception handling affected our output flow? Fix with calls info
1184     DEBUG({
1185       if (CallFreq > CurNodeFreq)
1186         report("Bumping node frequency with call info\n");
1187     });
1188     CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;
1189 
1190     if (CurNodeFreq > 0) {
1191       if (uint64_t Addr = EntryAddress[Cur]) {
1192         DEBUG(
1193             reportNumber("  Setting flow at entry point address 0x", Addr, 16));
1194         DEBUG(reportNumber("  with: ", CurNodeFreq, 10));
1195         Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;
1196       }
1197     }
1198 
1199     // No parent? Reached a tree root, limit to call frequency updating.
1200     if (SpanningTreeNodes[Cur].NumInEdges == 0)
1201       continue;
1202 
1203     assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
1204     const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
1205     const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
1206 
1207     // Calculate parent edge freq.
1208     int64_t ParentEdgeFreq = CurNodeFreq;
1209     for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {
1210       const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;
1211       ParentEdgeFreq -= EdgeFreqs[PredEdge];
1212     }
1213 
1214     // Sometimes the conservative CFG that BOLT builds will lead to incorrect
1215     // flow computation. For example, in a BB that transitively calls the exit
1216     // syscall, BOLT will add a fall-through successor even though it should not
1217     // have any successors. So this block execution will likely be wrong. We
1218     // tolerate this imperfection since this case should be quite infrequent.
1219     if (ParentEdgeFreq < 0) {
1220       DEBUG(dumpEdgeFreqs());
1221       DEBUG(report("WARNING: incorrect flow"));
1222       ParentEdgeFreq = 0;
1223     }
1224     DEBUG(reportNumber("  Setting freq for ParentEdge: ", ParentEdge, 10));
1225     DEBUG(reportNumber("  with ParentEdgeFreq: ", ParentEdgeFreq, 10));
1226     EdgeFreqs[ParentEdge] = ParentEdgeFreq;
1227   }
1228 
1229   Alloc.deallocate(EntryAddress);
1230   Alloc.deallocate(LeafFrequency);
1231   Alloc.deallocate(Visited);
1232   Alloc.deallocate(Stack);
1233   CallMap->~NodeToCallsMap();
1234   Alloc.deallocate(CallMap);
1235   DEBUG(dumpEdgeFreqs());
1236 }
1237 
1238 /// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
1239 /// \p Alloc to allocate helper dynamic structures used to compute profile for
1240 /// edges that we do not explictly instrument.
1241 const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,
1242                                     const uint8_t *FuncDesc,
1243                                     BumpPtrAllocator &Alloc) {
1244   const FunctionDescription F(FuncDesc);
1245   const uint8_t *next = FuncDesc + F.getSize();
1246 
1247 #if !defined(__APPLE__)
1248   uint64_t *bolt_instr_locations = __bolt_instr_locations;
1249 #else
1250   uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();
1251 #endif
1252 
1253   // Skip funcs we know are cold
1254 #ifndef ENABLE_DEBUG
1255   uint64_t CountersFreq = 0;
1256   for (int I = 0; I < F.NumLeafNodes; ++I)
1257     CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];
1258 
1259   if (CountersFreq == 0) {
1260     for (int I = 0; I < F.NumEdges; ++I) {
1261       const uint32_t C = F.Edges[I].Counter;
1262       if (C == 0xffffffff)
1263         continue;
1264       CountersFreq += bolt_instr_locations[C];
1265     }
1266     if (CountersFreq == 0) {
1267       for (int I = 0; I < F.NumCalls; ++I) {
1268         const uint32_t C = F.Calls[I].Counter;
1269         if (C == 0xffffffff)
1270           continue;
1271         CountersFreq += bolt_instr_locations[C];
1272       }
1273       if (CountersFreq == 0)
1274         return next;
1275     }
1276   }
1277 #endif
1278 
1279   Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);
1280   DEBUG(G->dump());
1281 
1282   if (!G->EdgeFreqs && !G->CallFreqs) {
1283     G->~Graph();
1284     Alloc.deallocate(G);
1285     return next;
1286   }
1287 
1288   for (int I = 0; I < F.NumEdges; ++I) {
1289     const uint64_t Freq = G->EdgeFreqs[I];
1290     if (Freq == 0)
1291       continue;
1292     const EdgeDescription *Desc = &F.Edges[I];
1293     char LineBuf[BufSize];
1294     char *Ptr = LineBuf;
1295     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1296     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1297     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);
1298     Ptr = intToStr(Ptr, Freq, 10);
1299     *Ptr++ = '\n';
1300     __write(FD, LineBuf, Ptr - LineBuf);
1301   }
1302 
1303   for (int I = 0; I < F.NumCalls; ++I) {
1304     const uint64_t Freq = G->CallFreqs[I];
1305     if (Freq == 0)
1306       continue;
1307     char LineBuf[BufSize];
1308     char *Ptr = LineBuf;
1309     const CallDescription *Desc = &F.Calls[I];
1310     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1311     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1312     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1313     Ptr = intToStr(Ptr, Freq, 10);
1314     *Ptr++ = '\n';
1315     __write(FD, LineBuf, Ptr - LineBuf);
1316   }
1317 
1318   G->~Graph();
1319   Alloc.deallocate(G);
1320   return next;
1321 }
1322 
1323 #if !defined(__APPLE__)
1324 const IndCallTargetDescription *
1325 ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {
1326   uint32_t B = 0;
1327   uint32_t E = __bolt_instr_num_ind_targets;
1328   if (E == 0)
1329     return nullptr;
1330   do {
1331     uint32_t I = (E - B) / 2 + B;
1332     if (IndCallTargets[I].Address == Target)
1333       return &IndCallTargets[I];
1334     if (IndCallTargets[I].Address < Target)
1335       B = I + 1;
1336     else
1337       E = I;
1338   } while (B < E);
1339   return nullptr;
1340 }
1341 
1342 /// Write a single indirect call <src, target> pair to the fdata file
1343 void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,
1344                          int FD, int CallsiteID,
1345                          ProfileWriterContext *Ctx) {
1346   if (Entry.Val == 0)
1347     return;
1348   DEBUG(reportNumber("Target func 0x", Entry.Key, 16));
1349   DEBUG(reportNumber("Target freq: ", Entry.Val, 10));
1350   const IndCallDescription *CallsiteDesc =
1351       &Ctx->IndCallDescriptions[CallsiteID];
1352   const IndCallTargetDescription *TargetDesc =
1353       Ctx->lookupIndCallTarget(Entry.Key);
1354   if (!TargetDesc) {
1355     DEBUG(report("Failed to lookup indirect call target\n"));
1356     char LineBuf[BufSize];
1357     char *Ptr = LineBuf;
1358     Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1359     Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);
1360     Ptr = intToStr(Ptr, Entry.Val, 10);
1361     *Ptr++ = '\n';
1362     __write(FD, LineBuf, Ptr - LineBuf);
1363     return;
1364   }
1365   Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;
1366   char LineBuf[BufSize];
1367   char *Ptr = LineBuf;
1368   Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1369   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1370   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1371   Ptr = intToStr(Ptr, Entry.Val, 10);
1372   *Ptr++ = '\n';
1373   __write(FD, LineBuf, Ptr - LineBuf);
1374 }
1375 
1376 /// Write to \p FD all of the indirect call profiles.
1377 void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {
1378   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {
1379     DEBUG(reportNumber("IndCallsite #", I, 10));
1380     GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);
1381   }
1382 }
1383 
1384 /// Check a single call flow for a callee versus all known callers. If there are
1385 /// less callers than what the callee expects, write the difference with source
1386 /// [unknown] in the profile.
1387 void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
1388                         ProfileWriterContext *Ctx) {
1389   DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16));
1390   DEBUG(reportNumber("Calls: ", Entry.Calls, 10));
1391   DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10));
1392   DEBUG({
1393     if (Entry.Calls > Entry.Val)
1394       report("  More calls than expected!\n");
1395   });
1396   if (Entry.Val <= Entry.Calls)
1397     return;
1398   DEBUG(reportNumber(
1399       "  Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10));
1400   const IndCallTargetDescription *TargetDesc =
1401       Ctx->lookupIndCallTarget(Entry.Key);
1402   if (!TargetDesc) {
1403     // There is probably something wrong with this callee and this should be
1404     // investigated, but I don't want to assert and lose all data collected.
1405     DEBUG(report("WARNING: failed to look up call target!\n"));
1406     return;
1407   }
1408   char LineBuf[BufSize];
1409   char *Ptr = LineBuf;
1410   Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);
1411   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1412   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1413   Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);
1414   *Ptr++ = '\n';
1415   __write(FD, LineBuf, Ptr - LineBuf);
1416 }
1417 
1418 /// Open fdata file for writing and return a valid file descriptor, aborting
1419 /// program upon failure.
1420 int openProfile() {
1421   // Build the profile name string by appending our PID
1422   char Buf[BufSize];
1423   char *Ptr = Buf;
1424   uint64_t PID = __getpid();
1425   Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
1426   if (__bolt_instr_use_pid) {
1427     Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
1428     Ptr = intToStr(Ptr, PID, 10);
1429     Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));
1430   }
1431   *Ptr++ = '\0';
1432   uint64_t FD = __open(Buf,
1433                        /*flags=*/0x241 /*O_WRONLY|O_TRUNC|O_CREAT*/,
1434                        /*mode=*/0666);
1435   if (static_cast<int64_t>(FD) < 0) {
1436     report("Error while trying to open profile file for writing: ");
1437     report(Buf);
1438     reportNumber("\nFailed with error number: 0x",
1439                  0 - static_cast<int64_t>(FD), 16);
1440     __exit(1);
1441   }
1442   return FD;
1443 }
1444 
1445 #endif
1446 
1447 } // anonymous namespace
1448 
1449 #if !defined(__APPLE__)
1450 
1451 /// Reset all counters in case you want to start profiling a new phase of your
1452 /// program independently of prior phases.
1453 /// The address of this function is printed by BOLT and this can be called by
1454 /// any attached debugger during runtime. There is a useful oneliner for gdb:
1455 ///
1456 ///   gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
1457 ///     -ex 'set confirm off' -ex quit
1458 ///
1459 /// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
1460 /// name.
1461 extern "C" void __bolt_instr_clear_counters() {
1462   memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,
1463          __bolt_num_counters * 8);
1464   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)
1465     GlobalIndCallCounters[I].resetCounters();
1466 }
1467 
1468 /// This is the entry point for profile writing.
1469 /// There are three ways of getting here:
1470 ///
1471 ///  * Program execution ended, finalization methods are running and BOLT
1472 ///    hooked into FINI from your binary dynamic section;
1473 ///  * You used the sleep timer option and during initialization we forked
1474 ///    a separete process that will call this function periodically;
1475 ///  * BOLT prints this function address so you can attach a debugger and
1476 ///    call this function directly to get your profile written to disk
1477 ///    on demand.
1478 ///
1479 extern "C" void __attribute((force_align_arg_pointer))
1480 __bolt_instr_data_dump() {
1481   // Already dumping
1482   if (!GlobalWriteProfileMutex->acquire())
1483     return;
1484 
1485   BumpPtrAllocator HashAlloc;
1486   HashAlloc.setMaxSize(0x6400000);
1487   ProfileWriterContext Ctx = readDescriptions();
1488   Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);
1489 
1490   DEBUG(printStats(Ctx));
1491 
1492   int FD = openProfile();
1493 
1494   BumpPtrAllocator Alloc;
1495   Alloc.setMaxSize(0x6400000);
1496   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1497   for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {
1498     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1499     Alloc.clear();
1500     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1501   }
1502   assert(FuncDesc == (void *)Ctx.Strings,
1503          "FuncDesc ptr must be equal to stringtable");
1504 
1505   writeIndirectCallProfile(FD, Ctx);
1506   Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);
1507 
1508   __fsync(FD);
1509   __close(FD);
1510   __munmap(Ctx.MMapPtr, Ctx.MMapSize);
1511   __close(Ctx.FileDesc);
1512   HashAlloc.destroy();
1513   GlobalWriteProfileMutex->release();
1514   DEBUG(report("Finished writing profile.\n"));
1515 }
1516 
1517 /// Event loop for our child process spawned during setup to dump profile data
1518 /// at user-specified intervals
1519 void watchProcess() {
1520   timespec ts, rem;
1521   uint64_t Ellapsed = 0ull;
1522   uint64_t ppid;
1523   if (__bolt_instr_wait_forks) {
1524     // Store parent pgid
1525     ppid = -__getpgid(0);
1526     // And leave parent process group
1527     __setpgid(0, 0);
1528   } else {
1529     // Store parent pid
1530     ppid = __getppid();
1531     if (ppid == 1) {
1532       // Parent already dead
1533       __bolt_instr_data_dump();
1534       goto out;
1535     }
1536   }
1537 
1538   ts.tv_sec = 1;
1539   ts.tv_nsec = 0;
1540   while (1) {
1541     __nanosleep(&ts, &rem);
1542     // This means our parent process or all its forks are dead,
1543     // so no need for us to keep dumping.
1544     if (__kill(ppid, 0) < 0) {
1545       if (__bolt_instr_no_counters_clear)
1546         __bolt_instr_data_dump();
1547       break;
1548     }
1549 
1550     if (++Ellapsed < __bolt_instr_sleep_time)
1551       continue;
1552 
1553     Ellapsed = 0;
1554     __bolt_instr_data_dump();
1555     if (__bolt_instr_no_counters_clear == false)
1556       __bolt_instr_clear_counters();
1557   }
1558 
1559 out:;
1560   DEBUG(report("My parent process is dead, bye!\n"));
1561   __exit(0);
1562 }
1563 
1564 extern "C" void __bolt_instr_indirect_call();
1565 extern "C" void __bolt_instr_indirect_tailcall();
1566 
1567 /// Initialization code
1568 extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {
1569   __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;
1570   __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;
1571 
1572   const uint64_t CountersStart =
1573       reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);
1574   const uint64_t CountersEnd = alignTo(
1575       reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),
1576       0x1000);
1577   DEBUG(reportNumber("replace mmap start: ", CountersStart, 16));
1578   DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16));
1579   assert(CountersEnd > CountersStart, "no counters");
1580 
1581   const bool Shared = !__bolt_instr_use_pid;
1582   const uint64_t MapPrivateOrShared = Shared ? MAP_SHARED : MAP_PRIVATE;
1583 
1584   void *Ret =
1585       __mmap(CountersStart, CountersEnd - CountersStart, PROT_READ | PROT_WRITE,
1586              MAP_ANONYMOUS | MapPrivateOrShared | MAP_FIXED, -1, 0);
1587   assert(Ret != MAP_FAILED, "__bolt_instr_setup: Failed to mmap counters!");
1588 
1589   GlobalMetadataStorage = __mmap(0, 4096, PROT_READ | PROT_WRITE,
1590                                  MapPrivateOrShared | MAP_ANONYMOUS, -1, 0);
1591   assert(GlobalMetadataStorage != MAP_FAILED,
1592          "__bolt_instr_setup: failed to mmap page for metadata!");
1593 
1594   GlobalAlloc = new (GlobalMetadataStorage) BumpPtrAllocator;
1595   // Conservatively reserve 100MiB
1596   GlobalAlloc->setMaxSize(0x6400000);
1597   GlobalAlloc->setShared(Shared);
1598   GlobalWriteProfileMutex = new (*GlobalAlloc, 0) Mutex();
1599   if (__bolt_instr_num_ind_calls > 0)
1600     GlobalIndCallCounters =
1601         new (*GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];
1602 
1603   if (__bolt_instr_sleep_time != 0) {
1604     // Separate instrumented process to the own process group
1605     if (__bolt_instr_wait_forks)
1606       __setpgid(0, 0);
1607 
1608     if (long PID = __fork())
1609       return;
1610     watchProcess();
1611   }
1612 }
1613 
1614 extern "C" __attribute((force_align_arg_pointer)) void
1615 instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {
1616   GlobalIndCallCounters[IndCallID].incrementVal(Target, *GlobalAlloc);
1617 }
1618 
1619 /// We receive as in-stack arguments the identifier of the indirect call site
1620 /// as well as the target address for the call
1621 extern "C" __attribute((naked)) void __bolt_instr_indirect_call()
1622 {
1623   __asm__ __volatile__(SAVE_ALL
1624                        "mov 0xa0(%%rsp), %%rdi\n"
1625                        "mov 0x98(%%rsp), %%rsi\n"
1626                        "call instrumentIndirectCall\n"
1627                        RESTORE_ALL
1628                        "ret\n"
1629                        :::);
1630 }
1631 
1632 extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()
1633 {
1634   __asm__ __volatile__(SAVE_ALL
1635                        "mov 0x98(%%rsp), %%rdi\n"
1636                        "mov 0x90(%%rsp), %%rsi\n"
1637                        "call instrumentIndirectCall\n"
1638                        RESTORE_ALL
1639                        "ret\n"
1640                        :::);
1641 }
1642 
1643 /// This is hooking ELF's entry, it needs to save all machine state.
1644 extern "C" __attribute((naked)) void __bolt_instr_start()
1645 {
1646   __asm__ __volatile__(SAVE_ALL
1647                        "call __bolt_instr_setup\n"
1648                        RESTORE_ALL
1649                        "jmp __bolt_start_trampoline\n"
1650                        :::);
1651 }
1652 
1653 /// This is hooking into ELF's DT_FINI
1654 extern "C" void __bolt_instr_fini() {
1655   __bolt_fini_trampoline();
1656   if (__bolt_instr_sleep_time == 0)
1657     __bolt_instr_data_dump();
1658   DEBUG(report("Finished.\n"));
1659 }
1660 
1661 #endif
1662 
1663 #if defined(__APPLE__)
1664 
1665 extern "C" void __bolt_instr_data_dump() {
1666   ProfileWriterContext Ctx = readDescriptions();
1667 
1668   int FD = 2;
1669   BumpPtrAllocator Alloc;
1670   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1671   uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();
1672 
1673   for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {
1674     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1675     Alloc.clear();
1676     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1677   }
1678   assert(FuncDesc == (void *)Ctx.Strings,
1679          "FuncDesc ptr must be equal to stringtable");
1680 }
1681 
1682 // On OSX/iOS the final symbol name of an extern "C" function/variable contains
1683 // one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
1684 extern "C"
1685 __attribute__((section("__TEXT,__setup")))
1686 __attribute__((force_align_arg_pointer))
1687 void _bolt_instr_setup() {
1688   __asm__ __volatile__(SAVE_ALL :::);
1689 
1690   report("Hello!\n");
1691 
1692   __asm__ __volatile__(RESTORE_ALL :::);
1693 }
1694 
1695 extern "C"
1696 __attribute__((section("__TEXT,__fini")))
1697 __attribute__((force_align_arg_pointer))
1698 void _bolt_instr_fini() {
1699   report("Bye!\n");
1700   __bolt_instr_data_dump();
1701 }
1702 
1703 #endif
1704 #endif
1705