xref: /llvm-project/bolt/runtime/instr.cpp (revision 8f7c53ef81c17ae9d773818181d04ef1c3890912)
1 //===- bolt/runtime/instr.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
10 // not support linking modules with dependencies on one another into the final
11 // binary (TODO?), which means this library has to be self-contained in a single
12 // module.
13 //
14 // All extern declarations here need to be defined by BOLT itself. Those will be
15 // undefined symbols that BOLT needs to resolve by emitting these symbols with
16 // MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
17 // for defining the symbols here and these two files have a tight coupling: one
18 // working statically when you run BOLT and another during program runtime when
19 // you run an instrumented binary. The main goal here is to output an fdata file
20 // (BOLT profile) with the instrumentation counters inserted by the static pass.
21 // Counters for indirect calls are an exception, as we can't know them
22 // statically. These counters are created and managed here. To allow this, we
23 // need a minimal framework for allocating memory dynamically. We provide this
24 // with the BumpPtrAllocator class (not LLVM's, but our own version of it).
25 //
26 // Since this code is intended to be inserted into any executable, we decided to
27 // make it standalone and do not depend on any external libraries (i.e. language
28 // support libraries, such as glibc or stdc++). To allow this, we provide a few
29 // light implementations of common OS interacting functionalities using direct
30 // syscall wrappers. Our simple allocator doesn't manage deallocations that
31 // fragment the memory space, so it's stack based. This is the minimal framework
32 // provided here to allow processing instrumented counters and writing fdata.
33 //
34 // In the C++ idiom used here, we never use or rely on constructors or
35 // destructors for global objects. That's because those need support from the
36 // linker in initialization/finalization code, and we want to keep our linker
37 // very simple. Similarly, we don't create any global objects that are zero
38 // initialized, since those would need to go .bss, which our simple linker also
39 // don't support (TODO?).
40 //
41 //===----------------------------------------------------------------------===//
42 
43 #if defined (__x86_64__)
44 #include "common.h"
45 
46 // Enables a very verbose logging to stderr useful when debugging
47 //#define ENABLE_DEBUG
48 
49 #ifdef ENABLE_DEBUG
50 #define DEBUG(X)                                                               \
51   { X; }
52 #else
53 #define DEBUG(X)                                                               \
54   {}
55 #endif
56 
57 #pragma GCC visibility push(hidden)
58 
59 extern "C" {
60 
61 #if defined(__APPLE__)
62 extern uint64_t* _bolt_instr_locations_getter();
63 extern uint32_t _bolt_num_counters_getter();
64 
65 extern uint8_t* _bolt_instr_tables_getter();
66 extern uint32_t _bolt_instr_num_funcs_getter();
67 
68 #else
69 
70 // Main counters inserted by instrumentation, incremented during runtime when
71 // points of interest (locations) in the program are reached. Those are direct
72 // calls and direct and indirect branches (local ones). There are also counters
73 // for basic block execution if they are a spanning tree leaf and need to be
74 // counted in order to infer the execution count of other edges of the CFG.
75 extern uint64_t __bolt_instr_locations[];
76 extern uint32_t __bolt_num_counters;
77 // Descriptions are serialized metadata about binary functions written by BOLT,
78 // so we have a minimal understanding about the program structure. For a
79 // reference on the exact format of this metadata, see *Description structs,
80 // Location, IntrumentedNode and EntryNode.
81 // Number of indirect call site descriptions
82 extern uint32_t __bolt_instr_num_ind_calls;
83 // Number of indirect call target descriptions
84 extern uint32_t __bolt_instr_num_ind_targets;
85 // Number of function descriptions
86 extern uint32_t __bolt_instr_num_funcs;
87 // Time to sleep across dumps (when we write the fdata profile to disk)
88 extern uint32_t __bolt_instr_sleep_time;
89 // Do not clear counters across dumps, rewrite file with the updated values
90 extern bool __bolt_instr_no_counters_clear;
91 // Wait until all forks of instrumented process will finish
92 extern bool __bolt_instr_wait_forks;
93 // Filename to dump data to
94 extern char __bolt_instr_filename[];
95 // Instumented binary file path
96 extern char __bolt_instr_binpath[];
97 // If true, append current PID to the fdata filename when creating it so
98 // different invocations of the same program can be differentiated.
99 extern bool __bolt_instr_use_pid;
100 // Functions that will be used to instrument indirect calls. BOLT static pass
101 // will identify indirect calls and modify them to load the address in these
102 // trampolines and call this address instead. BOLT can't use direct calls to
103 // our handlers because our addresses here are not known at analysis time. We
104 // only support resolving dependencies from this file to the output of BOLT,
105 // *not* the other way around.
106 // TODO: We need better linking support to make that happen.
107 extern void (*__bolt_ind_call_counter_func_pointer)();
108 extern void (*__bolt_ind_tailcall_counter_func_pointer)();
109 // Function pointers to init/fini trampoline routines in the binary, so we can
110 // resume regular execution of these functions that we hooked
111 extern void __bolt_start_trampoline();
112 extern void __bolt_fini_trampoline();
113 
114 #endif
115 }
116 
117 namespace {
118 
119 /// A simple allocator that mmaps a fixed size region and manages this space
120 /// in a stack fashion, meaning you always deallocate the last element that
121 /// was allocated. In practice, we don't need to deallocate individual elements.
122 /// We monotonically increase our usage and then deallocate everything once we
123 /// are done processing something.
124 class BumpPtrAllocator {
125   /// This is written before each allocation and act as a canary to detect when
126   /// a bug caused our program to cross allocation boundaries.
127   struct EntryMetadata {
128     uint64_t Magic;
129     uint64_t AllocSize;
130   };
131 
132 public:
133   void *allocate(size_t Size) {
134     Lock L(M);
135 
136     if (StackBase == nullptr) {
137       StackBase = reinterpret_cast<uint8_t *>(
138           __mmap(0, MaxSize, PROT_READ | PROT_WRITE,
139                  (Shared ? MAP_SHARED : MAP_PRIVATE) | MAP_ANONYMOUS, -1, 0));
140       assert(StackBase != MAP_FAILED,
141              "BumpPtrAllocator: failed to mmap stack!");
142       StackSize = 0;
143     }
144 
145     Size = alignTo(Size + sizeof(EntryMetadata), 16);
146     uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);
147     auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);
148     M->Magic = Magic;
149     M->AllocSize = Size;
150     StackSize += Size;
151     assert(StackSize < MaxSize, "allocator ran out of memory");
152     return AllocAddress;
153   }
154 
155 #ifdef DEBUG
156   /// Element-wise deallocation is only used for debugging to catch memory
157   /// bugs by checking magic bytes. Ordinarily, we reset the allocator once
158   /// we are done with it. Reset is done with clear(). There's no need
159   /// to deallocate each element individually.
160   void deallocate(void *Ptr) {
161     Lock L(M);
162     uint8_t MetadataOffset = sizeof(EntryMetadata);
163     auto *M = reinterpret_cast<EntryMetadata *>(
164         reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);
165     const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;
166     // Validate size
167     if (Ptr != StackTop - M->AllocSize) {
168       // Failed validation, check if it is a pointer returned by operator new []
169       MetadataOffset +=
170           sizeof(uint64_t); // Space for number of elements alloc'ed
171       M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -
172                                             MetadataOffset);
173       // Ok, it failed both checks if this assertion fails. Stop the program, we
174       // have a memory bug.
175       assert(Ptr == StackTop - M->AllocSize,
176              "must deallocate the last element alloc'ed");
177     }
178     assert(M->Magic == Magic, "allocator magic is corrupt");
179     StackSize -= M->AllocSize;
180   }
181 #else
182   void deallocate(void *) {}
183 #endif
184 
185   void clear() {
186     Lock L(M);
187     StackSize = 0;
188   }
189 
190   /// Set mmap reservation size (only relevant before first allocation)
191   void setMaxSize(uint64_t Size) { MaxSize = Size; }
192 
193   /// Set mmap reservation privacy (only relevant before first allocation)
194   void setShared(bool S) { Shared = S; }
195 
196   void destroy() {
197     if (StackBase == nullptr)
198       return;
199     __munmap(StackBase, MaxSize);
200   }
201 
202 private:
203   static constexpr uint64_t Magic = 0x1122334455667788ull;
204   uint64_t MaxSize = 0xa00000;
205   uint8_t *StackBase{nullptr};
206   uint64_t StackSize{0};
207   bool Shared{false};
208   Mutex M;
209 };
210 
211 /// Used for allocating indirect call instrumentation counters. Initialized by
212 /// __bolt_instr_setup, our initialization routine.
213 BumpPtrAllocator GlobalAlloc;
214 } // anonymous namespace
215 
216 // User-defined placement new operators. We only use those (as opposed to
217 // overriding the regular operator new) so we can keep our allocator in the
218 // stack instead of in a data section (global).
219 void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }
220 void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {
221   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
222   memset(Ptr, C, Sz);
223   return Ptr;
224 }
225 void *operator new[](size_t Sz, BumpPtrAllocator &A) {
226   return A.allocate(Sz);
227 }
228 void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {
229   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
230   memset(Ptr, C, Sz);
231   return Ptr;
232 }
233 // Only called during exception unwinding (useless). We must manually dealloc.
234 // C++ language weirdness
235 void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }
236 
237 namespace {
238 
239 // Disable instrumentation optimizations that sacrifice profile accuracy
240 extern "C" bool __bolt_instr_conservative;
241 
242 /// Basic key-val atom stored in our hash
243 struct SimpleHashTableEntryBase {
244   uint64_t Key;
245   uint64_t Val;
246 };
247 
248 /// This hash table implementation starts by allocating a table of size
249 /// InitialSize. When conflicts happen in this main table, it resolves
250 /// them by chaining a new table of size IncSize. It never reallocs as our
251 /// allocator doesn't support it. The key is intended to be function pointers.
252 /// There's no clever hash function (it's just x mod size, size being prime).
253 /// I never tuned the coefficientes in the modular equation (TODO)
254 /// This is used for indirect calls (each call site has one of this, so it
255 /// should have a small footprint) and for tallying call counts globally for
256 /// each target to check if we missed the origin of some calls (this one is a
257 /// large instantiation of this template, since it is global for all call sites)
258 template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,
259           uint32_t IncSize = 7>
260 class SimpleHashTable {
261 public:
262   using MapEntry = T;
263 
264   /// Increment by 1 the value of \p Key. If it is not in this table, it will be
265   /// added to the table and its value set to 1.
266   void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {
267     ++get(Key, Alloc).Val;
268   }
269 
270   /// Basic member accessing interface. Here we pass the allocator explicitly to
271   /// avoid storing a pointer to it as part of this table (remember there is one
272   /// hash for each indirect call site, so we wan't to minimize our footprint).
273   MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {
274     if (!__bolt_instr_conservative) {
275       TryLock L(M);
276       if (!L.isLocked())
277         return NoEntry;
278       return getOrAllocEntry(Key, Alloc);
279     }
280     Lock L(M);
281     return getOrAllocEntry(Key, Alloc);
282   }
283 
284   /// Traverses all elements in the table
285   template <typename... Args>
286   void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {
287     Lock L(M);
288     if (!TableRoot)
289       return;
290     return forEachElement(Callback, InitialSize, TableRoot, args...);
291   }
292 
293   void resetCounters();
294 
295 private:
296   constexpr static uint64_t VacantMarker = 0;
297   constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;
298 
299   MapEntry *TableRoot{nullptr};
300   MapEntry NoEntry;
301   Mutex M;
302 
303   template <typename... Args>
304   void forEachElement(void (*Callback)(MapEntry &, Args...),
305                       uint32_t NumEntries, MapEntry *Entries, Args... args) {
306     for (uint32_t I = 0; I < NumEntries; ++I) {
307       MapEntry &Entry = Entries[I];
308       if (Entry.Key == VacantMarker)
309         continue;
310       if (Entry.Key & FollowUpTableMarker) {
311         forEachElement(Callback, IncSize,
312                        reinterpret_cast<MapEntry *>(Entry.Key &
313                                                     ~FollowUpTableMarker),
314                        args...);
315         continue;
316       }
317       Callback(Entry, args...);
318     }
319   }
320 
321   MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {
322     TableRoot = new (Alloc, 0) MapEntry[InitialSize];
323     MapEntry &Entry = TableRoot[Key % InitialSize];
324     Entry.Key = Key;
325     return Entry;
326   }
327 
328   MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,
329                      BumpPtrAllocator &Alloc, int CurLevel) {
330     const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;
331     uint64_t Remainder = Selector / NumEntries;
332     Selector = Selector % NumEntries;
333     MapEntry &Entry = Entries[Selector];
334 
335     // A hit
336     if (Entry.Key == Key) {
337       return Entry;
338     }
339 
340     // Vacant - add new entry
341     if (Entry.Key == VacantMarker) {
342       Entry.Key = Key;
343       return Entry;
344     }
345 
346     // Defer to the next level
347     if (Entry.Key & FollowUpTableMarker) {
348       return getEntry(
349           reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),
350           Key, Remainder, Alloc, CurLevel + 1);
351     }
352 
353     // Conflict - create the next level
354     MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];
355     uint64_t CurEntrySelector = Entry.Key / InitialSize;
356     for (int I = 0; I < CurLevel; ++I)
357       CurEntrySelector /= IncSize;
358     CurEntrySelector = CurEntrySelector % IncSize;
359     NextLevelTbl[CurEntrySelector] = Entry;
360     Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;
361     return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);
362   }
363 
364   MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {
365     if (TableRoot)
366       return getEntry(TableRoot, Key, Key, Alloc, 0);
367     return firstAllocation(Key, Alloc);
368   }
369 };
370 
371 template <typename T> void resetIndCallCounter(T &Entry) {
372   Entry.Val = 0;
373 }
374 
375 template <typename T, uint32_t X, uint32_t Y>
376 void SimpleHashTable<T, X, Y>::resetCounters() {
377   forEachElement(resetIndCallCounter);
378 }
379 
380 /// Represents a hash table mapping a function target address to its counter.
381 using IndirectCallHashTable = SimpleHashTable<>;
382 
383 /// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
384 /// global array of all hash tables storing indirect call destinations happening
385 /// during runtime, one table per call site.
386 IndirectCallHashTable *GlobalIndCallCounters{
387     reinterpret_cast<IndirectCallHashTable *>(1)};
388 
389 /// Don't allow reentrancy in the fdata writing phase - only one thread writes
390 /// it
391 Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};
392 
393 /// Store number of calls in additional to target address (Key) and frequency
394 /// as perceived by the basic block counter (Val).
395 struct CallFlowEntryBase : public SimpleHashTableEntryBase {
396   uint64_t Calls;
397 };
398 
399 using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;
400 
401 /// This is a large table indexing all possible call targets (indirect and
402 /// direct ones). The goal is to find mismatches between number of calls (for
403 /// those calls we were able to track) and the entry basic block counter of the
404 /// callee. In most cases, these two should be equal. If not, there are two
405 /// possible scenarios here:
406 ///
407 ///  * Entry BB has higher frequency than all known calls to this function.
408 ///    In this case, we have dynamic library code or any uninstrumented code
409 ///    calling this function. We will write the profile for these untracked
410 ///    calls as having source "0 [unknown] 0" in the fdata file.
411 ///
412 ///  * Number of known calls is higher than the frequency of entry BB
413 ///    This only happens when there is no counter for the entry BB / callee
414 ///    function is not simple (in BOLT terms). We don't do anything special
415 ///    here and just ignore those (we still report all calls to the non-simple
416 ///    function, though).
417 ///
418 class CallFlowHashTable : public CallFlowHashTableBase {
419 public:
420   CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
421 
422   MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }
423 
424 private:
425   // Different than the hash table for indirect call targets, we do store the
426   // allocator here since there is only one call flow hash and space overhead
427   // is negligible.
428   BumpPtrAllocator &Alloc;
429 };
430 
431 ///
432 /// Description metadata emitted by BOLT to describe the program - refer to
433 /// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
434 ///
435 struct Location {
436   uint32_t FunctionName;
437   uint32_t Offset;
438 };
439 
440 struct CallDescription {
441   Location From;
442   uint32_t FromNode;
443   Location To;
444   uint32_t Counter;
445   uint64_t TargetAddress;
446 };
447 
448 using IndCallDescription = Location;
449 
450 struct IndCallTargetDescription {
451   Location Loc;
452   uint64_t Address;
453 };
454 
455 struct EdgeDescription {
456   Location From;
457   uint32_t FromNode;
458   Location To;
459   uint32_t ToNode;
460   uint32_t Counter;
461 };
462 
463 struct InstrumentedNode {
464   uint32_t Node;
465   uint32_t Counter;
466 };
467 
468 struct EntryNode {
469   uint64_t Node;
470   uint64_t Address;
471 };
472 
473 struct FunctionDescription {
474   uint32_t NumLeafNodes;
475   const InstrumentedNode *LeafNodes;
476   uint32_t NumEdges;
477   const EdgeDescription *Edges;
478   uint32_t NumCalls;
479   const CallDescription *Calls;
480   uint32_t NumEntryNodes;
481   const EntryNode *EntryNodes;
482 
483   /// Constructor will parse the serialized function metadata written by BOLT
484   FunctionDescription(const uint8_t *FuncDesc);
485 
486   uint64_t getSize() const {
487     return 16 + NumLeafNodes * sizeof(InstrumentedNode) +
488            NumEdges * sizeof(EdgeDescription) +
489            NumCalls * sizeof(CallDescription) +
490            NumEntryNodes * sizeof(EntryNode);
491   }
492 };
493 
494 /// The context is created when the fdata profile needs to be written to disk
495 /// and we need to interpret our runtime counters. It contains pointers to the
496 /// mmaped binary (only the BOLT written metadata section). Deserialization
497 /// should be straightforward as most data is POD or an array of POD elements.
498 /// This metadata is used to reconstruct function CFGs.
499 struct ProfileWriterContext {
500   IndCallDescription *IndCallDescriptions;
501   IndCallTargetDescription *IndCallTargets;
502   uint8_t *FuncDescriptions;
503   char *Strings;  // String table with function names used in this binary
504   int FileDesc;   // File descriptor for the file on disk backing this
505                   // information in memory via mmap
506   void *MMapPtr;  // The mmap ptr
507   int MMapSize;   // The mmap size
508 
509   /// Hash table storing all possible call destinations to detect untracked
510   /// calls and correctly report them as [unknown] in output fdata.
511   CallFlowHashTable *CallFlowTable;
512 
513   /// Lookup the sorted indirect call target vector to fetch function name and
514   /// offset for an arbitrary function pointer.
515   const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;
516 };
517 
518 /// Perform a string comparison and returns zero if Str1 matches Str2. Compares
519 /// at most Size characters.
520 int compareStr(const char *Str1, const char *Str2, int Size) {
521   while (*Str1 == *Str2) {
522     if (*Str1 == '\0' || --Size == 0)
523       return 0;
524     ++Str1;
525     ++Str2;
526   }
527   return 1;
528 }
529 
530 /// Output Location to the fdata file
531 char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,
532                    const Location Loc, uint32_t BufSize) {
533   // fdata location format: Type Name Offset
534   // Type 1 - regular symbol
535   OutBuf = strCopy(OutBuf, "1 ");
536   const char *Str = Ctx.Strings + Loc.FunctionName;
537   uint32_t Size = 25;
538   while (*Str) {
539     *OutBuf++ = *Str++;
540     if (++Size >= BufSize)
541       break;
542   }
543   assert(!*Str, "buffer overflow, function name too large");
544   *OutBuf++ = ' ';
545   OutBuf = intToStr(OutBuf, Loc.Offset, 16);
546   *OutBuf++ = ' ';
547   return OutBuf;
548 }
549 
550 /// Read and deserialize a function description written by BOLT. \p FuncDesc
551 /// points at the beginning of the function metadata structure in the file.
552 /// See Instrumentation::emitTablesAsELFNote()
553 FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {
554   NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);
555   DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10));
556   LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);
557 
558   NumEdges = *reinterpret_cast<const uint32_t *>(
559       FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));
560   DEBUG(reportNumber("NumEdges = ", NumEdges, 10));
561   Edges = reinterpret_cast<const EdgeDescription *>(
562       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));
563 
564   NumCalls = *reinterpret_cast<const uint32_t *>(
565       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +
566       NumEdges * sizeof(EdgeDescription));
567   DEBUG(reportNumber("NumCalls = ", NumCalls, 10));
568   Calls = reinterpret_cast<const CallDescription *>(
569       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
570       NumEdges * sizeof(EdgeDescription));
571   NumEntryNodes = *reinterpret_cast<const uint32_t *>(
572       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
573       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
574   DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10));
575   EntryNodes = reinterpret_cast<const EntryNode *>(
576       FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +
577       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
578 }
579 
580 /// Read and mmap descriptions written by BOLT from the executable's notes
581 /// section
582 #if defined(HAVE_ELF_H) and !defined(__APPLE__)
583 
584 void *__attribute__((noinline)) __get_pc() {
585   return __builtin_extract_return_addr(__builtin_return_address(0));
586 }
587 
588 /// Get string with address and parse it to hex pair <StartAddress, EndAddress>
589 bool parseAddressRange(const char *Str, uint64_t &StartAddress,
590                        uint64_t &EndAddress) {
591   if (!Str)
592     return false;
593   // Parsed string format: <hex1>-<hex2>
594   StartAddress = hexToLong(Str, '-');
595   while (*Str && *Str != '-')
596     ++Str;
597   if (!*Str)
598     return false;
599   ++Str; // swallow '-'
600   EndAddress = hexToLong(Str);
601   return true;
602 }
603 
604 /// Get full path to the real binary by getting current virtual address
605 /// and searching for the appropriate link in address range in
606 /// /proc/self/map_files
607 static char *getBinaryPath() {
608   const uint32_t BufSize = 1024;
609   const uint32_t NameMax = 4096;
610   const char DirPath[] = "/proc/self/map_files/";
611   static char TargetPath[NameMax] = {};
612   char Buf[BufSize];
613 
614   if (__bolt_instr_binpath[0] != '\0')
615     return __bolt_instr_binpath;
616 
617   if (TargetPath[0] != '\0')
618     return TargetPath;
619 
620   unsigned long CurAddr = (unsigned long)__get_pc();
621   uint64_t FDdir = __open(DirPath,
622                           /*flags=*/0 /*O_RDONLY*/,
623                           /*mode=*/0666);
624   assert(static_cast<int64_t>(FDdir) >= 0,
625          "failed to open /proc/self/map_files");
626 
627   while (long Nread = __getdents(FDdir, (struct dirent *)Buf, BufSize)) {
628     assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");
629 
630     struct dirent *d;
631     for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {
632       d = (struct dirent *)(Buf + Bpos);
633 
634       uint64_t StartAddress, EndAddress;
635       if (!parseAddressRange(d->d_name, StartAddress, EndAddress))
636         continue;
637       if (CurAddr < StartAddress || CurAddr > EndAddress)
638         continue;
639       char FindBuf[NameMax];
640       char *C = strCopy(FindBuf, DirPath, NameMax);
641       C = strCopy(C, d->d_name, NameMax - (C - FindBuf));
642       *C = '\0';
643       uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));
644       assert(Ret != -1 && Ret != BufSize, "readlink error");
645       TargetPath[Ret] = '\0';
646       return TargetPath;
647     }
648   }
649   return nullptr;
650 }
651 
652 ProfileWriterContext readDescriptions() {
653   ProfileWriterContext Result;
654   char *BinPath = getBinaryPath();
655   assert(BinPath && BinPath[0] != '\0', "failed to find binary path");
656 
657   uint64_t FD = __open(BinPath,
658                        /*flags=*/0 /*O_RDONLY*/,
659                        /*mode=*/0666);
660   assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");
661 
662   Result.FileDesc = FD;
663 
664   // mmap our binary to memory
665   uint64_t Size = __lseek(FD, 0, 2 /*SEEK_END*/);
666   uint8_t *BinContents = reinterpret_cast<uint8_t *>(
667       __mmap(0, Size, PROT_READ, MAP_PRIVATE, FD, 0));
668   assert(BinContents != MAP_FAILED, "readDescriptions: Failed to mmap self!");
669   Result.MMapPtr = BinContents;
670   Result.MMapSize = Size;
671   Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);
672   Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);
673   Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(
674       BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);
675 
676   // Find .bolt.instr.tables with the data we need and set pointers to it
677   for (int I = 0; I < Hdr->e_shnum; ++I) {
678     char *SecName = reinterpret_cast<char *>(
679         BinContents + StringTblHeader->sh_offset + Shdr->sh_name);
680     if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {
681       Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +
682                                             (I + 1) * Hdr->e_shentsize);
683       continue;
684     }
685     // Actual contents of the ELF note start after offset 20 decimal:
686     // Offset 0: Producer name size (4 bytes)
687     // Offset 4: Contents size (4 bytes)
688     // Offset 8: Note type (4 bytes)
689     // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)
690     // Offset 20: Contents
691     uint32_t IndCallDescSize =
692         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);
693     uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(
694         BinContents + Shdr->sh_offset + 24 + IndCallDescSize);
695     uint32_t FuncDescSize =
696         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +
697                                       IndCallDescSize + IndCallTargetDescSize);
698     Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(
699         BinContents + Shdr->sh_offset + 24);
700     Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
701         BinContents + Shdr->sh_offset + 28 + IndCallDescSize);
702     Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +
703                               IndCallDescSize + IndCallTargetDescSize;
704     Result.Strings = reinterpret_cast<char *>(
705         BinContents + Shdr->sh_offset + 32 + IndCallDescSize +
706         IndCallTargetDescSize + FuncDescSize);
707     return Result;
708   }
709   const char ErrMsg[] =
710       "BOLT instrumentation runtime error: could not find section "
711       ".bolt.instr.tables\n";
712   reportError(ErrMsg, sizeof(ErrMsg));
713   return Result;
714 }
715 
716 #else
717 
718 ProfileWriterContext readDescriptions() {
719   ProfileWriterContext Result;
720   uint8_t *Tables = _bolt_instr_tables_getter();
721   uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);
722   uint32_t IndCallTargetDescSize =
723       *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);
724   uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(
725       Tables + 8 + IndCallDescSize + IndCallTargetDescSize);
726   Result.IndCallDescriptions =
727       reinterpret_cast<IndCallDescription *>(Tables + 4);
728   Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
729       Tables + 8 + IndCallDescSize);
730   Result.FuncDescriptions =
731       Tables + 12 + IndCallDescSize + IndCallTargetDescSize;
732   Result.Strings = reinterpret_cast<char *>(
733       Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);
734   return Result;
735 }
736 
737 #endif
738 
739 #if !defined(__APPLE__)
740 /// Debug by printing overall metadata global numbers to check it is sane
741 void printStats(const ProfileWriterContext &Ctx) {
742   char StatMsg[BufSize];
743   char *StatPtr = StatMsg;
744   StatPtr =
745       strCopy(StatPtr,
746               "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");
747   StatPtr = intToStr(StatPtr,
748                      Ctx.FuncDescriptions -
749                          reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),
750                      10);
751   StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");
752   StatPtr = intToStr(
753       StatPtr,
754       reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);
755   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");
756   StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);
757   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");
758   StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);
759   StatPtr = strCopy(StatPtr, "\n");
760   __write(2, StatMsg, StatPtr - StatMsg);
761 }
762 #endif
763 
764 
765 /// This is part of a simple CFG representation in memory, where we store
766 /// a dynamically sized array of input and output edges per node, and store
767 /// a dynamically sized array of nodes per graph. We also store the spanning
768 /// tree edges for that CFG in a separate array of nodes in
769 /// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
770 struct Edge {
771   uint32_t Node; // Index in nodes array regarding the destination of this edge
772   uint32_t ID;   // Edge index in an array comprising all edges of the graph
773 };
774 
775 /// A regular graph node or a spanning tree node
776 struct Node {
777   uint32_t NumInEdges{0};  // Input edge count used to size InEdge
778   uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges
779   Edge *InEdges{nullptr};  // Created and managed by \p Graph
780   Edge *OutEdges{nullptr}; // ditto
781 };
782 
783 /// Main class for CFG representation in memory. Manages object creation and
784 /// destruction, populates an array of CFG nodes as well as corresponding
785 /// spanning tree nodes.
786 struct Graph {
787   uint32_t NumNodes;
788   Node *CFGNodes;
789   Node *SpanningTreeNodes;
790   uint64_t *EdgeFreqs;
791   uint64_t *CallFreqs;
792   BumpPtrAllocator &Alloc;
793   const FunctionDescription &D;
794 
795   /// Reads a list of edges from function description \p D and builds
796   /// the graph from it. Allocates several internal dynamic structures that are
797   /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all
798   /// spanning tree leaf nodes descriptions (their counters). They are the seed
799   /// used to compute the rest of the missing edge counts in a bottom-up
800   /// traversal of the spanning tree.
801   Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
802         const uint64_t *Counters, ProfileWriterContext &Ctx);
803   ~Graph();
804   void dump() const;
805 
806 private:
807   void computeEdgeFrequencies(const uint64_t *Counters,
808                               ProfileWriterContext &Ctx);
809   void dumpEdgeFreqs() const;
810 };
811 
812 Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
813              const uint64_t *Counters, ProfileWriterContext &Ctx)
814     : Alloc(Alloc), D(D) {
815   DEBUG(reportNumber("G = 0x", (uint64_t)this, 16));
816   // First pass to determine number of nodes
817   int32_t MaxNodes = -1;
818   CallFreqs = nullptr;
819   EdgeFreqs = nullptr;
820   for (int I = 0; I < D.NumEdges; ++I) {
821     if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)
822       MaxNodes = D.Edges[I].FromNode;
823     if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)
824       MaxNodes = D.Edges[I].ToNode;
825   }
826 
827   for (int I = 0; I < D.NumLeafNodes; ++I)
828     if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)
829       MaxNodes = D.LeafNodes[I].Node;
830 
831   for (int I = 0; I < D.NumCalls; ++I)
832     if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)
833       MaxNodes = D.Calls[I].FromNode;
834 
835   // No nodes? Nothing to do
836   if (MaxNodes < 0) {
837     DEBUG(report("No nodes!\n"));
838     CFGNodes = nullptr;
839     SpanningTreeNodes = nullptr;
840     NumNodes = 0;
841     return;
842   }
843   ++MaxNodes;
844   DEBUG(reportNumber("NumNodes = ", MaxNodes, 10));
845   NumNodes = static_cast<uint32_t>(MaxNodes);
846 
847   // Initial allocations
848   CFGNodes = new (Alloc) Node[MaxNodes];
849 
850   DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16));
851   SpanningTreeNodes = new (Alloc) Node[MaxNodes];
852   DEBUG(reportNumber("G->SpanningTreeNodes = 0x",
853                      (uint64_t)SpanningTreeNodes, 16));
854 
855   // Figure out how much to allocate to each vector (in/out edge sets)
856   for (int I = 0; I < D.NumEdges; ++I) {
857     CFGNodes[D.Edges[I].FromNode].NumOutEdges++;
858     CFGNodes[D.Edges[I].ToNode].NumInEdges++;
859     if (D.Edges[I].Counter != 0xffffffff)
860       continue;
861 
862     SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;
863     SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;
864   }
865 
866   // Allocate in/out edge sets
867   for (int I = 0; I < MaxNodes; ++I) {
868     if (CFGNodes[I].NumInEdges > 0)
869       CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];
870     if (CFGNodes[I].NumOutEdges > 0)
871       CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];
872     if (SpanningTreeNodes[I].NumInEdges > 0)
873       SpanningTreeNodes[I].InEdges =
874           new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];
875     if (SpanningTreeNodes[I].NumOutEdges > 0)
876       SpanningTreeNodes[I].OutEdges =
877           new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];
878     CFGNodes[I].NumInEdges = 0;
879     CFGNodes[I].NumOutEdges = 0;
880     SpanningTreeNodes[I].NumInEdges = 0;
881     SpanningTreeNodes[I].NumOutEdges = 0;
882   }
883 
884   // Fill in/out edge sets
885   for (int I = 0; I < D.NumEdges; ++I) {
886     const uint32_t Src = D.Edges[I].FromNode;
887     const uint32_t Dst = D.Edges[I].ToNode;
888     Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];
889     E->Node = Dst;
890     E->ID = I;
891 
892     E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];
893     E->Node = Src;
894     E->ID = I;
895 
896     if (D.Edges[I].Counter != 0xffffffff)
897       continue;
898 
899     E = &SpanningTreeNodes[Src]
900              .OutEdges[SpanningTreeNodes[Src].NumOutEdges++];
901     E->Node = Dst;
902     E->ID = I;
903 
904     E = &SpanningTreeNodes[Dst]
905              .InEdges[SpanningTreeNodes[Dst].NumInEdges++];
906     E->Node = Src;
907     E->ID = I;
908   }
909 
910   computeEdgeFrequencies(Counters, Ctx);
911 }
912 
913 Graph::~Graph() {
914   if (CallFreqs)
915     Alloc.deallocate(CallFreqs);
916   if (EdgeFreqs)
917     Alloc.deallocate(EdgeFreqs);
918   for (int I = NumNodes - 1; I >= 0; --I) {
919     if (SpanningTreeNodes[I].OutEdges)
920       Alloc.deallocate(SpanningTreeNodes[I].OutEdges);
921     if (SpanningTreeNodes[I].InEdges)
922       Alloc.deallocate(SpanningTreeNodes[I].InEdges);
923     if (CFGNodes[I].OutEdges)
924       Alloc.deallocate(CFGNodes[I].OutEdges);
925     if (CFGNodes[I].InEdges)
926       Alloc.deallocate(CFGNodes[I].InEdges);
927   }
928   if (SpanningTreeNodes)
929     Alloc.deallocate(SpanningTreeNodes);
930   if (CFGNodes)
931     Alloc.deallocate(CFGNodes);
932 }
933 
934 void Graph::dump() const {
935   reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);
936   report("  Full graph:\n");
937   for (int I = 0; I < NumNodes; ++I) {
938     const Node *N = &CFGNodes[I];
939     reportNumber("    Node #", I, 10);
940     reportNumber("      InEdges total ", N->NumInEdges, 10);
941     for (int J = 0; J < N->NumInEdges; ++J)
942       reportNumber("        ", N->InEdges[J].Node, 10);
943     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
944     for (int J = 0; J < N->NumOutEdges; ++J)
945       reportNumber("        ", N->OutEdges[J].Node, 10);
946     report("\n");
947   }
948   report("  Spanning tree:\n");
949   for (int I = 0; I < NumNodes; ++I) {
950     const Node *N = &SpanningTreeNodes[I];
951     reportNumber("    Node #", I, 10);
952     reportNumber("      InEdges total ", N->NumInEdges, 10);
953     for (int J = 0; J < N->NumInEdges; ++J)
954       reportNumber("        ", N->InEdges[J].Node, 10);
955     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
956     for (int J = 0; J < N->NumOutEdges; ++J)
957       reportNumber("        ", N->OutEdges[J].Node, 10);
958     report("\n");
959   }
960 }
961 
962 void Graph::dumpEdgeFreqs() const {
963   reportNumber(
964       "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);
965   for (int I = 0; I < D.NumEdges; ++I) {
966     reportNumber("* Src: ", D.Edges[I].FromNode, 10);
967     reportNumber("  Dst: ", D.Edges[I].ToNode, 10);
968     reportNumber("    Cnt: ", EdgeFreqs[I], 10);
969   }
970 }
971 
972 /// Auxiliary map structure for fast lookups of which calls map to each node of
973 /// the function CFG
974 struct NodeToCallsMap {
975   struct MapEntry {
976     uint32_t NumCalls;
977     uint32_t *Calls;
978   };
979   MapEntry *Entries;
980   BumpPtrAllocator &Alloc;
981   const uint32_t NumNodes;
982 
983   NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,
984                  uint32_t NumNodes)
985       : Alloc(Alloc), NumNodes(NumNodes) {
986     Entries = new (Alloc, 0) MapEntry[NumNodes];
987     for (int I = 0; I < D.NumCalls; ++I) {
988       DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10));
989       ++Entries[D.Calls[I].FromNode].NumCalls;
990     }
991     for (int I = 0; I < NumNodes; ++I) {
992       Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)
993                                                    uint32_t[Entries[I].NumCalls]
994                                              : nullptr;
995       Entries[I].NumCalls = 0;
996     }
997     for (int I = 0; I < D.NumCalls; ++I) {
998       MapEntry &Entry = Entries[D.Calls[I].FromNode];
999       Entry.Calls[Entry.NumCalls++] = I;
1000     }
1001   }
1002 
1003   /// Set the frequency of all calls in node \p NodeID to Freq. However, if
1004   /// the calls have their own counters and do not depend on the basic block
1005   /// counter, this means they have landing pads and throw exceptions. In this
1006   /// case, set their frequency with their counters and return the maximum
1007   /// value observed in such counters. This will be used as the new frequency
1008   /// at basic block entry. This is used to fix the CFG edge frequencies in the
1009   /// presence of exceptions.
1010   uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,
1011                            const FunctionDescription &D,
1012                            const uint64_t *Counters,
1013                            ProfileWriterContext &Ctx) const {
1014     const MapEntry &Entry = Entries[NodeID];
1015     uint64_t MaxValue = 0ull;
1016     for (int I = 0, E = Entry.NumCalls; I != E; ++I) {
1017       const uint32_t CallID = Entry.Calls[I];
1018       DEBUG(reportNumber("  Setting freq for call ID: ", CallID, 10));
1019       const CallDescription &CallDesc = D.Calls[CallID];
1020       if (CallDesc.Counter == 0xffffffff) {
1021         CallFreqs[CallID] = Freq;
1022         DEBUG(reportNumber("  with : ", Freq, 10));
1023       } else {
1024         const uint64_t CounterVal = Counters[CallDesc.Counter];
1025         CallFreqs[CallID] = CounterVal;
1026         MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;
1027         DEBUG(reportNumber("  with (private counter) : ", CounterVal, 10));
1028       }
1029       DEBUG(reportNumber("  Address: 0x", CallDesc.TargetAddress, 16));
1030       if (CallFreqs[CallID] > 0)
1031         Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=
1032             CallFreqs[CallID];
1033     }
1034     return MaxValue;
1035   }
1036 
1037   ~NodeToCallsMap() {
1038     for (int I = NumNodes - 1; I >= 0; --I)
1039       if (Entries[I].Calls)
1040         Alloc.deallocate(Entries[I].Calls);
1041     Alloc.deallocate(Entries);
1042   }
1043 };
1044 
1045 /// Fill an array with the frequency of each edge in the function represented
1046 /// by G, as well as another array for each call.
1047 void Graph::computeEdgeFrequencies(const uint64_t *Counters,
1048                                    ProfileWriterContext &Ctx) {
1049   if (NumNodes == 0)
1050     return;
1051 
1052   EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;
1053   CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;
1054 
1055   // Setup a lookup for calls present in each node (BB)
1056   NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);
1057 
1058   // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the
1059   // spanning tree don't have explicit counters. We must infer their value using
1060   // a linear combination of other counters (sum of counters of the outgoing
1061   // edges minus sum of counters of the incoming edges).
1062   uint32_t *Stack = new (Alloc) uint32_t [NumNodes];
1063   uint32_t StackTop = 0;
1064   enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };
1065   Status *Visited = new (Alloc, 0) Status[NumNodes];
1066   uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];
1067   uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];
1068 
1069   // Setup a fast lookup for frequency of leaf nodes, which have special
1070   // basic block frequency instrumentation (they are not edge profiled).
1071   for (int I = 0; I < D.NumLeafNodes; ++I) {
1072     LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];
1073     DEBUG({
1074       if (Counters[D.LeafNodes[I].Counter] > 0) {
1075         reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);
1076         reportNumber("     Counter: ", Counters[D.LeafNodes[I].Counter], 10);
1077       }
1078     });
1079   }
1080   for (int I = 0; I < D.NumEntryNodes; ++I) {
1081     EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;
1082     DEBUG({
1083         reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);
1084         reportNumber("      Address: ", D.EntryNodes[I].Address, 16);
1085     });
1086   }
1087   // Add all root nodes to the stack
1088   for (int I = 0; I < NumNodes; ++I)
1089     if (SpanningTreeNodes[I].NumInEdges == 0)
1090       Stack[StackTop++] = I;
1091 
1092   // Empty stack?
1093   if (StackTop == 0) {
1094     DEBUG(report("Empty stack!\n"));
1095     Alloc.deallocate(EntryAddress);
1096     Alloc.deallocate(LeafFrequency);
1097     Alloc.deallocate(Visited);
1098     Alloc.deallocate(Stack);
1099     CallMap->~NodeToCallsMap();
1100     Alloc.deallocate(CallMap);
1101     if (CallFreqs)
1102       Alloc.deallocate(CallFreqs);
1103     if (EdgeFreqs)
1104       Alloc.deallocate(EdgeFreqs);
1105     EdgeFreqs = nullptr;
1106     CallFreqs = nullptr;
1107     return;
1108   }
1109   // Add all known edge counts, will infer the rest
1110   for (int I = 0; I < D.NumEdges; ++I) {
1111     const uint32_t C = D.Edges[I].Counter;
1112     if (C == 0xffffffff) // inferred counter - we will compute its value
1113       continue;
1114     EdgeFreqs[I] = Counters[C];
1115   }
1116 
1117   while (StackTop > 0) {
1118     const uint32_t Cur = Stack[--StackTop];
1119     DEBUG({
1120       if (Visited[Cur] == S_VISITING)
1121         report("(visiting) ");
1122       else
1123         report("(new) ");
1124       reportNumber("Cur: ", Cur, 10);
1125     });
1126 
1127     // This shouldn't happen in a tree
1128     assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");
1129     if (Visited[Cur] == S_NEW) {
1130       Visited[Cur] = S_VISITING;
1131       Stack[StackTop++] = Cur;
1132       assert(StackTop <= NumNodes, "stack grew too large");
1133       for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {
1134         const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;
1135         Stack[StackTop++] = Succ;
1136         assert(StackTop <= NumNodes, "stack grew too large");
1137       }
1138       continue;
1139     }
1140     Visited[Cur] = S_VISITED;
1141 
1142     // Establish our node frequency based on outgoing edges, which should all be
1143     // resolved by now.
1144     int64_t CurNodeFreq = LeafFrequency[Cur];
1145     // Not a leaf?
1146     if (!CurNodeFreq) {
1147       for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {
1148         const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;
1149         CurNodeFreq += EdgeFreqs[SuccEdge];
1150       }
1151     }
1152     if (CurNodeFreq < 0)
1153       CurNodeFreq = 0;
1154 
1155     const uint64_t CallFreq = CallMap->visitAllCallsIn(
1156         Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx);
1157 
1158     // Exception handling affected our output flow? Fix with calls info
1159     DEBUG({
1160       if (CallFreq > CurNodeFreq)
1161         report("Bumping node frequency with call info\n");
1162     });
1163     CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;
1164 
1165     if (CurNodeFreq > 0) {
1166       if (uint64_t Addr = EntryAddress[Cur]) {
1167         DEBUG(
1168             reportNumber("  Setting flow at entry point address 0x", Addr, 16));
1169         DEBUG(reportNumber("  with: ", CurNodeFreq, 10));
1170         Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;
1171       }
1172     }
1173 
1174     // No parent? Reached a tree root, limit to call frequency updating.
1175     if (SpanningTreeNodes[Cur].NumInEdges == 0)
1176       continue;
1177 
1178     assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
1179     const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
1180     const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
1181 
1182     // Calculate parent edge freq.
1183     int64_t ParentEdgeFreq = CurNodeFreq;
1184     for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {
1185       const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;
1186       ParentEdgeFreq -= EdgeFreqs[PredEdge];
1187     }
1188 
1189     // Sometimes the conservative CFG that BOLT builds will lead to incorrect
1190     // flow computation. For example, in a BB that transitively calls the exit
1191     // syscall, BOLT will add a fall-through successor even though it should not
1192     // have any successors. So this block execution will likely be wrong. We
1193     // tolerate this imperfection since this case should be quite infrequent.
1194     if (ParentEdgeFreq < 0) {
1195       DEBUG(dumpEdgeFreqs());
1196       DEBUG(report("WARNING: incorrect flow"));
1197       ParentEdgeFreq = 0;
1198     }
1199     DEBUG(reportNumber("  Setting freq for ParentEdge: ", ParentEdge, 10));
1200     DEBUG(reportNumber("  with ParentEdgeFreq: ", ParentEdgeFreq, 10));
1201     EdgeFreqs[ParentEdge] = ParentEdgeFreq;
1202   }
1203 
1204   Alloc.deallocate(EntryAddress);
1205   Alloc.deallocate(LeafFrequency);
1206   Alloc.deallocate(Visited);
1207   Alloc.deallocate(Stack);
1208   CallMap->~NodeToCallsMap();
1209   Alloc.deallocate(CallMap);
1210   DEBUG(dumpEdgeFreqs());
1211 }
1212 
1213 /// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
1214 /// \p Alloc to allocate helper dynamic structures used to compute profile for
1215 /// edges that we do not explictly instrument.
1216 const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,
1217                                     const uint8_t *FuncDesc,
1218                                     BumpPtrAllocator &Alloc) {
1219   const FunctionDescription F(FuncDesc);
1220   const uint8_t *next = FuncDesc + F.getSize();
1221 
1222 #if !defined(__APPLE__)
1223   uint64_t *bolt_instr_locations = __bolt_instr_locations;
1224 #else
1225   uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();
1226 #endif
1227 
1228   // Skip funcs we know are cold
1229 #ifndef ENABLE_DEBUG
1230   uint64_t CountersFreq = 0;
1231   for (int I = 0; I < F.NumLeafNodes; ++I)
1232     CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];
1233 
1234   if (CountersFreq == 0) {
1235     for (int I = 0; I < F.NumEdges; ++I) {
1236       const uint32_t C = F.Edges[I].Counter;
1237       if (C == 0xffffffff)
1238         continue;
1239       CountersFreq += bolt_instr_locations[C];
1240     }
1241     if (CountersFreq == 0) {
1242       for (int I = 0; I < F.NumCalls; ++I) {
1243         const uint32_t C = F.Calls[I].Counter;
1244         if (C == 0xffffffff)
1245           continue;
1246         CountersFreq += bolt_instr_locations[C];
1247       }
1248       if (CountersFreq == 0)
1249         return next;
1250     }
1251   }
1252 #endif
1253 
1254   Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);
1255   DEBUG(G->dump());
1256 
1257   if (!G->EdgeFreqs && !G->CallFreqs) {
1258     G->~Graph();
1259     Alloc.deallocate(G);
1260     return next;
1261   }
1262 
1263   for (int I = 0; I < F.NumEdges; ++I) {
1264     const uint64_t Freq = G->EdgeFreqs[I];
1265     if (Freq == 0)
1266       continue;
1267     const EdgeDescription *Desc = &F.Edges[I];
1268     char LineBuf[BufSize];
1269     char *Ptr = LineBuf;
1270     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1271     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1272     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);
1273     Ptr = intToStr(Ptr, Freq, 10);
1274     *Ptr++ = '\n';
1275     __write(FD, LineBuf, Ptr - LineBuf);
1276   }
1277 
1278   for (int I = 0; I < F.NumCalls; ++I) {
1279     const uint64_t Freq = G->CallFreqs[I];
1280     if (Freq == 0)
1281       continue;
1282     char LineBuf[BufSize];
1283     char *Ptr = LineBuf;
1284     const CallDescription *Desc = &F.Calls[I];
1285     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1286     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1287     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1288     Ptr = intToStr(Ptr, Freq, 10);
1289     *Ptr++ = '\n';
1290     __write(FD, LineBuf, Ptr - LineBuf);
1291   }
1292 
1293   G->~Graph();
1294   Alloc.deallocate(G);
1295   return next;
1296 }
1297 
1298 #if !defined(__APPLE__)
1299 const IndCallTargetDescription *
1300 ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {
1301   uint32_t B = 0;
1302   uint32_t E = __bolt_instr_num_ind_targets;
1303   if (E == 0)
1304     return nullptr;
1305   do {
1306     uint32_t I = (E - B) / 2 + B;
1307     if (IndCallTargets[I].Address == Target)
1308       return &IndCallTargets[I];
1309     if (IndCallTargets[I].Address < Target)
1310       B = I + 1;
1311     else
1312       E = I;
1313   } while (B < E);
1314   return nullptr;
1315 }
1316 
1317 /// Write a single indirect call <src, target> pair to the fdata file
1318 void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,
1319                          int FD, int CallsiteID,
1320                          ProfileWriterContext *Ctx) {
1321   if (Entry.Val == 0)
1322     return;
1323   DEBUG(reportNumber("Target func 0x", Entry.Key, 16));
1324   DEBUG(reportNumber("Target freq: ", Entry.Val, 10));
1325   const IndCallDescription *CallsiteDesc =
1326       &Ctx->IndCallDescriptions[CallsiteID];
1327   const IndCallTargetDescription *TargetDesc =
1328       Ctx->lookupIndCallTarget(Entry.Key);
1329   if (!TargetDesc) {
1330     DEBUG(report("Failed to lookup indirect call target\n"));
1331     char LineBuf[BufSize];
1332     char *Ptr = LineBuf;
1333     Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1334     Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);
1335     Ptr = intToStr(Ptr, Entry.Val, 10);
1336     *Ptr++ = '\n';
1337     __write(FD, LineBuf, Ptr - LineBuf);
1338     return;
1339   }
1340   Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;
1341   char LineBuf[BufSize];
1342   char *Ptr = LineBuf;
1343   Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1344   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1345   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1346   Ptr = intToStr(Ptr, Entry.Val, 10);
1347   *Ptr++ = '\n';
1348   __write(FD, LineBuf, Ptr - LineBuf);
1349 }
1350 
1351 /// Write to \p FD all of the indirect call profiles.
1352 void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {
1353   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {
1354     DEBUG(reportNumber("IndCallsite #", I, 10));
1355     GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);
1356   }
1357 }
1358 
1359 /// Check a single call flow for a callee versus all known callers. If there are
1360 /// less callers than what the callee expects, write the difference with source
1361 /// [unknown] in the profile.
1362 void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
1363                         ProfileWriterContext *Ctx) {
1364   DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16));
1365   DEBUG(reportNumber("Calls: ", Entry.Calls, 10));
1366   DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10));
1367   DEBUG({
1368     if (Entry.Calls > Entry.Val)
1369       report("  More calls than expected!\n");
1370   });
1371   if (Entry.Val <= Entry.Calls)
1372     return;
1373   DEBUG(reportNumber(
1374       "  Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10));
1375   const IndCallTargetDescription *TargetDesc =
1376       Ctx->lookupIndCallTarget(Entry.Key);
1377   if (!TargetDesc) {
1378     // There is probably something wrong with this callee and this should be
1379     // investigated, but I don't want to assert and lose all data collected.
1380     DEBUG(report("WARNING: failed to look up call target!\n"));
1381     return;
1382   }
1383   char LineBuf[BufSize];
1384   char *Ptr = LineBuf;
1385   Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);
1386   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1387   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1388   Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);
1389   *Ptr++ = '\n';
1390   __write(FD, LineBuf, Ptr - LineBuf);
1391 }
1392 
1393 /// Open fdata file for writing and return a valid file descriptor, aborting
1394 /// program upon failure.
1395 int openProfile() {
1396   // Build the profile name string by appending our PID
1397   char Buf[BufSize];
1398   char *Ptr = Buf;
1399   uint64_t PID = __getpid();
1400   Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
1401   if (__bolt_instr_use_pid) {
1402     Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
1403     Ptr = intToStr(Ptr, PID, 10);
1404     Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));
1405   }
1406   *Ptr++ = '\0';
1407   uint64_t FD = __open(Buf,
1408                        /*flags=*/0x241 /*O_WRONLY|O_TRUNC|O_CREAT*/,
1409                        /*mode=*/0666);
1410   if (static_cast<int64_t>(FD) < 0) {
1411     report("Error while trying to open profile file for writing: ");
1412     report(Buf);
1413     reportNumber("\nFailed with error number: 0x",
1414                  0 - static_cast<int64_t>(FD), 16);
1415     __exit(1);
1416   }
1417   return FD;
1418 }
1419 
1420 #endif
1421 
1422 } // anonymous namespace
1423 
1424 #if !defined(__APPLE__)
1425 
1426 /// Reset all counters in case you want to start profiling a new phase of your
1427 /// program independently of prior phases.
1428 /// The address of this function is printed by BOLT and this can be called by
1429 /// any attached debugger during runtime. There is a useful oneliner for gdb:
1430 ///
1431 ///   gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
1432 ///     -ex 'set confirm off' -ex quit
1433 ///
1434 /// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
1435 /// name.
1436 extern "C" void __bolt_instr_clear_counters() {
1437   memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,
1438          __bolt_num_counters * 8);
1439   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)
1440     GlobalIndCallCounters[I].resetCounters();
1441 }
1442 
1443 /// This is the entry point for profile writing.
1444 /// There are three ways of getting here:
1445 ///
1446 ///  * Program execution ended, finalization methods are running and BOLT
1447 ///    hooked into FINI from your binary dynamic section;
1448 ///  * You used the sleep timer option and during initialization we forked
1449 ///    a separete process that will call this function periodically;
1450 ///  * BOLT prints this function address so you can attach a debugger and
1451 ///    call this function directly to get your profile written to disk
1452 ///    on demand.
1453 ///
1454 extern "C" void __attribute((force_align_arg_pointer))
1455 __bolt_instr_data_dump() {
1456   // Already dumping
1457   if (!GlobalWriteProfileMutex->acquire())
1458     return;
1459 
1460   BumpPtrAllocator HashAlloc;
1461   HashAlloc.setMaxSize(0x6400000);
1462   ProfileWriterContext Ctx = readDescriptions();
1463   Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);
1464 
1465   DEBUG(printStats(Ctx));
1466 
1467   int FD = openProfile();
1468 
1469   BumpPtrAllocator Alloc;
1470   Alloc.setMaxSize(0x6400000);
1471   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1472   for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {
1473     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1474     Alloc.clear();
1475     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1476   }
1477   assert(FuncDesc == (void *)Ctx.Strings,
1478          "FuncDesc ptr must be equal to stringtable");
1479 
1480   writeIndirectCallProfile(FD, Ctx);
1481   Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);
1482 
1483   __fsync(FD);
1484   __close(FD);
1485   __munmap(Ctx.MMapPtr, Ctx.MMapSize);
1486   __close(Ctx.FileDesc);
1487   HashAlloc.destroy();
1488   GlobalWriteProfileMutex->release();
1489   DEBUG(report("Finished writing profile.\n"));
1490 }
1491 
1492 /// Event loop for our child process spawned during setup to dump profile data
1493 /// at user-specified intervals
1494 void watchProcess() {
1495   timespec ts, rem;
1496   uint64_t Ellapsed = 0ull;
1497   uint64_t ppid;
1498   if (__bolt_instr_wait_forks) {
1499     // Store parent pgid
1500     ppid = -__getpgid(0);
1501     // And leave parent process group
1502     __setpgid(0, 0);
1503   } else {
1504     // Store parent pid
1505     ppid = __getppid();
1506     if (ppid == 1) {
1507       // Parent already dead
1508       __bolt_instr_data_dump();
1509       goto out;
1510     }
1511   }
1512 
1513   ts.tv_sec = 1;
1514   ts.tv_nsec = 0;
1515   while (1) {
1516     __nanosleep(&ts, &rem);
1517     // This means our parent process or all its forks are dead,
1518     // so no need for us to keep dumping.
1519     if (__kill(ppid, 0) < 0) {
1520       if (__bolt_instr_no_counters_clear)
1521         __bolt_instr_data_dump();
1522       break;
1523     }
1524 
1525     if (++Ellapsed < __bolt_instr_sleep_time)
1526       continue;
1527 
1528     Ellapsed = 0;
1529     __bolt_instr_data_dump();
1530     if (__bolt_instr_no_counters_clear == false)
1531       __bolt_instr_clear_counters();
1532   }
1533 
1534 out:;
1535   DEBUG(report("My parent process is dead, bye!\n"));
1536   __exit(0);
1537 }
1538 
1539 extern "C" void __bolt_instr_indirect_call();
1540 extern "C" void __bolt_instr_indirect_tailcall();
1541 
1542 /// Initialization code
1543 extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {
1544   const uint64_t CountersStart =
1545       reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);
1546   const uint64_t CountersEnd = alignTo(
1547       reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),
1548       0x1000);
1549   DEBUG(reportNumber("replace mmap start: ", CountersStart, 16));
1550   DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16));
1551   assert (CountersEnd > CountersStart, "no counters");
1552   // Maps our counters to be shared instead of private, so we keep counting for
1553   // forked processes
1554   void *Ret =
1555       __mmap(CountersStart, CountersEnd - CountersStart, PROT_READ | PROT_WRITE,
1556              MAP_ANONYMOUS | MAP_SHARED | MAP_FIXED, -1, 0);
1557   assert(Ret != MAP_FAILED, "__bolt_instr_setup: Failed to mmap counters!");
1558   __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;
1559   __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;
1560   // Conservatively reserve 100MiB shared pages
1561   GlobalAlloc.setMaxSize(0x6400000);
1562   GlobalAlloc.setShared(true);
1563   GlobalWriteProfileMutex = new (GlobalAlloc, 0) Mutex();
1564   if (__bolt_instr_num_ind_calls > 0)
1565     GlobalIndCallCounters =
1566         new (GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];
1567 
1568   if (__bolt_instr_sleep_time != 0) {
1569     // Separate instrumented process to the own process group
1570     if (__bolt_instr_wait_forks)
1571       __setpgid(0, 0);
1572 
1573     if (long PID = __fork())
1574       return;
1575     watchProcess();
1576   }
1577 }
1578 
1579 extern "C" __attribute((force_align_arg_pointer)) void
1580 instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {
1581   GlobalIndCallCounters[IndCallID].incrementVal(Target, GlobalAlloc);
1582 }
1583 
1584 /// We receive as in-stack arguments the identifier of the indirect call site
1585 /// as well as the target address for the call
1586 extern "C" __attribute((naked)) void __bolt_instr_indirect_call()
1587 {
1588   __asm__ __volatile__(SAVE_ALL
1589                        "mov 0xa0(%%rsp), %%rdi\n"
1590                        "mov 0x98(%%rsp), %%rsi\n"
1591                        "call instrumentIndirectCall\n"
1592                        RESTORE_ALL
1593                        "ret\n"
1594                        :::);
1595 }
1596 
1597 extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()
1598 {
1599   __asm__ __volatile__(SAVE_ALL
1600                        "mov 0x98(%%rsp), %%rdi\n"
1601                        "mov 0x90(%%rsp), %%rsi\n"
1602                        "call instrumentIndirectCall\n"
1603                        RESTORE_ALL
1604                        "ret\n"
1605                        :::);
1606 }
1607 
1608 /// This is hooking ELF's entry, it needs to save all machine state.
1609 extern "C" __attribute((naked)) void __bolt_instr_start()
1610 {
1611   __asm__ __volatile__(SAVE_ALL
1612                        "call __bolt_instr_setup\n"
1613                        RESTORE_ALL
1614                        "jmp __bolt_start_trampoline\n"
1615                        :::);
1616 }
1617 
1618 /// This is hooking into ELF's DT_FINI
1619 extern "C" void __bolt_instr_fini() {
1620   __bolt_fini_trampoline();
1621   if (__bolt_instr_sleep_time == 0)
1622     __bolt_instr_data_dump();
1623   DEBUG(report("Finished.\n"));
1624 }
1625 
1626 #endif
1627 
1628 #if defined(__APPLE__)
1629 
1630 extern "C" void __bolt_instr_data_dump() {
1631   ProfileWriterContext Ctx = readDescriptions();
1632 
1633   int FD = 2;
1634   BumpPtrAllocator Alloc;
1635   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1636   uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();
1637 
1638   for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {
1639     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1640     Alloc.clear();
1641     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1642   }
1643   assert(FuncDesc == (void *)Ctx.Strings,
1644          "FuncDesc ptr must be equal to stringtable");
1645 }
1646 
1647 // On OSX/iOS the final symbol name of an extern "C" function/variable contains
1648 // one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
1649 extern "C"
1650 __attribute__((section("__TEXT,__setup")))
1651 __attribute__((force_align_arg_pointer))
1652 void _bolt_instr_setup() {
1653   __asm__ __volatile__(SAVE_ALL :::);
1654 
1655   report("Hello!\n");
1656 
1657   __asm__ __volatile__(RESTORE_ALL :::);
1658 }
1659 
1660 extern "C"
1661 __attribute__((section("__TEXT,__fini")))
1662 __attribute__((force_align_arg_pointer))
1663 void _bolt_instr_fini() {
1664   report("Bye!\n");
1665   __bolt_instr_data_dump();
1666 }
1667 
1668 #endif
1669 #endif
1670