xref: /llvm-project/bolt/runtime/instr.cpp (revision 3e13b299f9a30c566b8ff9e62edaf3dbf2a6b7e8)
1 //===- bolt/runtime/instr.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
10 // not support linking modules with dependencies on one another into the final
11 // binary (TODO?), which means this library has to be self-contained in a single
12 // module.
13 //
14 // All extern declarations here need to be defined by BOLT itself. Those will be
15 // undefined symbols that BOLT needs to resolve by emitting these symbols with
16 // MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
17 // for defining the symbols here and these two files have a tight coupling: one
18 // working statically when you run BOLT and another during program runtime when
19 // you run an instrumented binary. The main goal here is to output an fdata file
20 // (BOLT profile) with the instrumentation counters inserted by the static pass.
21 // Counters for indirect calls are an exception, as we can't know them
22 // statically. These counters are created and managed here. To allow this, we
23 // need a minimal framework for allocating memory dynamically. We provide this
24 // with the BumpPtrAllocator class (not LLVM's, but our own version of it).
25 //
26 // Since this code is intended to be inserted into any executable, we decided to
27 // make it standalone and do not depend on any external libraries (i.e. language
28 // support libraries, such as glibc or stdc++). To allow this, we provide a few
29 // light implementations of common OS interacting functionalities using direct
30 // syscall wrappers. Our simple allocator doesn't manage deallocations that
31 // fragment the memory space, so it's stack based. This is the minimal framework
32 // provided here to allow processing instrumented counters and writing fdata.
33 //
34 // In the C++ idiom used here, we never use or rely on constructors or
35 // destructors for global objects. That's because those need support from the
36 // linker in initialization/finalization code, and we want to keep our linker
37 // very simple. Similarly, we don't create any global objects that are zero
38 // initialized, since those would need to go .bss, which our simple linker also
39 // don't support (TODO?).
40 //
41 //===----------------------------------------------------------------------===//
42 
43 #if defined (__x86_64__)
44 #include "common.h"
45 
46 // Enables a very verbose logging to stderr useful when debugging
47 //#define ENABLE_DEBUG
48 
49 #ifdef ENABLE_DEBUG
50 #define DEBUG(X)                                                               \
51   { X; }
52 #else
53 #define DEBUG(X)                                                               \
54   {}
55 #endif
56 
57 #pragma GCC visibility push(hidden)
58 
59 extern "C" {
60 
61 #if defined(__APPLE__)
62 extern uint64_t* _bolt_instr_locations_getter();
63 extern uint32_t _bolt_num_counters_getter();
64 
65 extern uint8_t* _bolt_instr_tables_getter();
66 extern uint32_t _bolt_instr_num_funcs_getter();
67 
68 #else
69 
70 // Main counters inserted by instrumentation, incremented during runtime when
71 // points of interest (locations) in the program are reached. Those are direct
72 // calls and direct and indirect branches (local ones). There are also counters
73 // for basic block execution if they are a spanning tree leaf and need to be
74 // counted in order to infer the execution count of other edges of the CFG.
75 extern uint64_t __bolt_instr_locations[];
76 extern uint32_t __bolt_num_counters;
77 // Descriptions are serialized metadata about binary functions written by BOLT,
78 // so we have a minimal understanding about the program structure. For a
79 // reference on the exact format of this metadata, see *Description structs,
80 // Location, IntrumentedNode and EntryNode.
81 // Number of indirect call site descriptions
82 extern uint32_t __bolt_instr_num_ind_calls;
83 // Number of indirect call target descriptions
84 extern uint32_t __bolt_instr_num_ind_targets;
85 // Number of function descriptions
86 extern uint32_t __bolt_instr_num_funcs;
87 // Time to sleep across dumps (when we write the fdata profile to disk)
88 extern uint32_t __bolt_instr_sleep_time;
89 // Do not clear counters across dumps, rewrite file with the updated values
90 extern bool __bolt_instr_no_counters_clear;
91 // Wait until all forks of instrumented process will finish
92 extern bool __bolt_instr_wait_forks;
93 // Filename to dump data to
94 extern char __bolt_instr_filename[];
95 // Instumented binary file path
96 extern char __bolt_instr_binpath[];
97 // If true, append current PID to the fdata filename when creating it so
98 // different invocations of the same program can be differentiated.
99 extern bool __bolt_instr_use_pid;
100 // Functions that will be used to instrument indirect calls. BOLT static pass
101 // will identify indirect calls and modify them to load the address in these
102 // trampolines and call this address instead. BOLT can't use direct calls to
103 // our handlers because our addresses here are not known at analysis time. We
104 // only support resolving dependencies from this file to the output of BOLT,
105 // *not* the other way around.
106 // TODO: We need better linking support to make that happen.
107 extern void (*__bolt_ind_call_counter_func_pointer)();
108 extern void (*__bolt_ind_tailcall_counter_func_pointer)();
109 // Function pointers to init/fini trampoline routines in the binary, so we can
110 // resume regular execution of these functions that we hooked
111 extern void __bolt_start_trampoline();
112 extern void __bolt_fini_trampoline();
113 
114 #endif
115 }
116 
117 namespace {
118 
119 /// A simple allocator that mmaps a fixed size region and manages this space
120 /// in a stack fashion, meaning you always deallocate the last element that
121 /// was allocated. In practice, we don't need to deallocate individual elements.
122 /// We monotonically increase our usage and then deallocate everything once we
123 /// are done processing something.
124 class BumpPtrAllocator {
125   /// This is written before each allocation and act as a canary to detect when
126   /// a bug caused our program to cross allocation boundaries.
127   struct EntryMetadata {
128     uint64_t Magic;
129     uint64_t AllocSize;
130   };
131 
132 public:
133   void *allocate(size_t Size) {
134     Lock L(M);
135 
136     if (StackBase == nullptr) {
137 #if defined(__APPLE__)
138     int MAP_PRIVATE_MAP_ANONYMOUS = 0x1002;
139 #else
140     int MAP_PRIVATE_MAP_ANONYMOUS = 0x22;
141 #endif
142       StackBase = reinterpret_cast<uint8_t *>(
143           __mmap(0, MaxSize, 0x3 /* PROT_READ | PROT_WRITE*/,
144                  Shared ? 0x21 /*MAP_SHARED | MAP_ANONYMOUS*/
145                         : MAP_PRIVATE_MAP_ANONYMOUS /* MAP_PRIVATE | MAP_ANONYMOUS*/,
146                  -1, 0));
147       StackSize = 0;
148     }
149 
150     Size = alignTo(Size + sizeof(EntryMetadata), 16);
151     uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);
152     auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);
153     M->Magic = Magic;
154     M->AllocSize = Size;
155     StackSize += Size;
156     assert(StackSize < MaxSize, "allocator ran out of memory");
157     return AllocAddress;
158   }
159 
160 #ifdef DEBUG
161   /// Element-wise deallocation is only used for debugging to catch memory
162   /// bugs by checking magic bytes. Ordinarily, we reset the allocator once
163   /// we are done with it. Reset is done with clear(). There's no need
164   /// to deallocate each element individually.
165   void deallocate(void *Ptr) {
166     Lock L(M);
167     uint8_t MetadataOffset = sizeof(EntryMetadata);
168     auto *M = reinterpret_cast<EntryMetadata *>(
169         reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);
170     const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;
171     // Validate size
172     if (Ptr != StackTop - M->AllocSize) {
173       // Failed validation, check if it is a pointer returned by operator new []
174       MetadataOffset +=
175           sizeof(uint64_t); // Space for number of elements alloc'ed
176       M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -
177                                             MetadataOffset);
178       // Ok, it failed both checks if this assertion fails. Stop the program, we
179       // have a memory bug.
180       assert(Ptr == StackTop - M->AllocSize,
181              "must deallocate the last element alloc'ed");
182     }
183     assert(M->Magic == Magic, "allocator magic is corrupt");
184     StackSize -= M->AllocSize;
185   }
186 #else
187   void deallocate(void *) {}
188 #endif
189 
190   void clear() {
191     Lock L(M);
192     StackSize = 0;
193   }
194 
195   /// Set mmap reservation size (only relevant before first allocation)
196   void setMaxSize(uint64_t Size) { MaxSize = Size; }
197 
198   /// Set mmap reservation privacy (only relevant before first allocation)
199   void setShared(bool S) { Shared = S; }
200 
201   void destroy() {
202     if (StackBase == nullptr)
203       return;
204     __munmap(StackBase, MaxSize);
205   }
206 
207 private:
208   static constexpr uint64_t Magic = 0x1122334455667788ull;
209   uint64_t MaxSize = 0xa00000;
210   uint8_t *StackBase{nullptr};
211   uint64_t StackSize{0};
212   bool Shared{false};
213   Mutex M;
214 };
215 
216 /// Used for allocating indirect call instrumentation counters. Initialized by
217 /// __bolt_instr_setup, our initialization routine.
218 BumpPtrAllocator GlobalAlloc;
219 } // anonymous namespace
220 
221 // User-defined placement new operators. We only use those (as opposed to
222 // overriding the regular operator new) so we can keep our allocator in the
223 // stack instead of in a data section (global).
224 void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }
225 void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {
226   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
227   memset(Ptr, C, Sz);
228   return Ptr;
229 }
230 void *operator new[](size_t Sz, BumpPtrAllocator &A) {
231   return A.allocate(Sz);
232 }
233 void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {
234   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
235   memset(Ptr, C, Sz);
236   return Ptr;
237 }
238 // Only called during exception unwinding (useless). We must manually dealloc.
239 // C++ language weirdness
240 void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }
241 
242 namespace {
243 
244 // Disable instrumentation optimizations that sacrifice profile accuracy
245 extern "C" bool __bolt_instr_conservative;
246 
247 /// Basic key-val atom stored in our hash
248 struct SimpleHashTableEntryBase {
249   uint64_t Key;
250   uint64_t Val;
251   void dump(const char *Msg = nullptr) {
252     // TODO: make some sort of formatting function
253     // Currently we have to do it the ugly way because
254     // we want every message to be printed atomically via a single call to
255     // __write. If we use reportNumber() and others nultiple times, we'll get
256     // garbage in mulithreaded environment
257     char Buf[BufSize];
258     char *Ptr = Buf;
259     Ptr = intToStr(Ptr, __getpid(), 10);
260     *Ptr++ = ':';
261     *Ptr++ = ' ';
262     if (Msg)
263       Ptr = strCopy(Ptr, Msg, strLen(Msg));
264     *Ptr++ = '0';
265     *Ptr++ = 'x';
266     Ptr = intToStr(Ptr, (uint64_t)this, 16);
267     *Ptr++ = ':';
268     *Ptr++ = ' ';
269     Ptr = strCopy(Ptr, "MapEntry(0x", sizeof("MapEntry(0x") - 1);
270     Ptr = intToStr(Ptr, Key, 16);
271     *Ptr++ = ',';
272     *Ptr++ = ' ';
273     *Ptr++ = '0';
274     *Ptr++ = 'x';
275     Ptr = intToStr(Ptr, Val, 16);
276     *Ptr++ = ')';
277     *Ptr++ = '\n';
278     assert(Ptr - Buf < BufSize, "Buffer overflow!");
279     // print everything all at once for atomicity
280     __write(2, Buf, Ptr - Buf);
281   }
282 };
283 
284 /// This hash table implementation starts by allocating a table of size
285 /// InitialSize. When conflicts happen in this main table, it resolves
286 /// them by chaining a new table of size IncSize. It never reallocs as our
287 /// allocator doesn't support it. The key is intended to be function pointers.
288 /// There's no clever hash function (it's just x mod size, size being prime).
289 /// I never tuned the coefficientes in the modular equation (TODO)
290 /// This is used for indirect calls (each call site has one of this, so it
291 /// should have a small footprint) and for tallying call counts globally for
292 /// each target to check if we missed the origin of some calls (this one is a
293 /// large instantiation of this template, since it is global for all call sites)
294 template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,
295           uint32_t IncSize = 7>
296 class SimpleHashTable {
297 public:
298   using MapEntry = T;
299 
300   /// Increment by 1 the value of \p Key. If it is not in this table, it will be
301   /// added to the table and its value set to 1.
302   void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {
303     ++get(Key, Alloc).Val;
304   }
305 
306   /// Basic member accessing interface. Here we pass the allocator explicitly to
307   /// avoid storing a pointer to it as part of this table (remember there is one
308   /// hash for each indirect call site, so we wan't to minimize our footprint).
309   MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {
310     if (!__bolt_instr_conservative) {
311       TryLock L(M);
312       if (!L.isLocked())
313         return NoEntry;
314       return getOrAllocEntry(Key, Alloc);
315     }
316     Lock L(M);
317     return getOrAllocEntry(Key, Alloc);
318   }
319 
320   /// Traverses all elements in the table
321   template <typename... Args>
322   void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {
323     Lock L(M);
324     if (!TableRoot)
325       return;
326     return forEachElement(Callback, InitialSize, TableRoot, args...);
327   }
328 
329   void resetCounters();
330 
331 private:
332   constexpr static uint64_t VacantMarker = 0;
333   constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;
334 
335   MapEntry *TableRoot{nullptr};
336   MapEntry NoEntry;
337   Mutex M;
338 
339   template <typename... Args>
340   void forEachElement(void (*Callback)(MapEntry &, Args...),
341                       uint32_t NumEntries, MapEntry *Entries, Args... args) {
342     for (uint32_t I = 0; I < NumEntries; ++I) {
343       MapEntry &Entry = Entries[I];
344       if (Entry.Key == VacantMarker)
345         continue;
346       if (Entry.Key & FollowUpTableMarker) {
347         forEachElement(Callback, IncSize,
348                        reinterpret_cast<MapEntry *>(Entry.Key &
349                                                     ~FollowUpTableMarker),
350                        args...);
351         continue;
352       }
353       Callback(Entry, args...);
354     }
355   }
356 
357   MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {
358     TableRoot = new (Alloc, 0) MapEntry[InitialSize];
359     MapEntry &Entry = TableRoot[Key % InitialSize];
360     Entry.Key = Key;
361     // DEBUG(Entry.dump("Created root entry: "));
362     return Entry;
363   }
364 
365   MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,
366                      BumpPtrAllocator &Alloc, int CurLevel) {
367     // DEBUG(reportNumber("getEntry called, level ", CurLevel, 10));
368     const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;
369     uint64_t Remainder = Selector / NumEntries;
370     Selector = Selector % NumEntries;
371     MapEntry &Entry = Entries[Selector];
372 
373     // A hit
374     if (Entry.Key == Key) {
375       // DEBUG(Entry.dump("Hit: "));
376       return Entry;
377     }
378 
379     // Vacant - add new entry
380     if (Entry.Key == VacantMarker) {
381       Entry.Key = Key;
382       // DEBUG(Entry.dump("Adding new entry: "));
383       return Entry;
384     }
385 
386     // Defer to the next level
387     if (Entry.Key & FollowUpTableMarker) {
388       return getEntry(
389           reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),
390           Key, Remainder, Alloc, CurLevel + 1);
391     }
392 
393     // Conflict - create the next level
394     // DEBUG(Entry.dump("Creating new level: "));
395 
396     MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];
397     // DEBUG(
398     //     reportNumber("Newly allocated level: 0x", uint64_t(NextLevelTbl),
399     //     16));
400     uint64_t CurEntrySelector = Entry.Key / InitialSize;
401     for (int I = 0; I < CurLevel; ++I)
402       CurEntrySelector /= IncSize;
403     CurEntrySelector = CurEntrySelector % IncSize;
404     NextLevelTbl[CurEntrySelector] = Entry;
405     Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;
406     assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) !=
407                uint64_t(Entries),
408            "circular reference created!\n");
409     // DEBUG(NextLevelTbl[CurEntrySelector].dump("New level entry: "));
410     // DEBUG(Entry.dump("Updated old entry: "));
411     return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);
412   }
413 
414   MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {
415     if (TableRoot)
416       return getEntry(TableRoot, Key, Key, Alloc, 0);
417     return firstAllocation(Key, Alloc);
418   }
419 };
420 
421 template <typename T> void resetIndCallCounter(T &Entry) {
422   Entry.Val = 0;
423 }
424 
425 template <typename T, uint32_t X, uint32_t Y>
426 void SimpleHashTable<T, X, Y>::resetCounters() {
427   forEachElement(resetIndCallCounter);
428 }
429 
430 /// Represents a hash table mapping a function target address to its counter.
431 using IndirectCallHashTable = SimpleHashTable<>;
432 
433 /// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
434 /// global array of all hash tables storing indirect call destinations happening
435 /// during runtime, one table per call site.
436 IndirectCallHashTable *GlobalIndCallCounters{
437     reinterpret_cast<IndirectCallHashTable *>(1)};
438 
439 /// Don't allow reentrancy in the fdata writing phase - only one thread writes
440 /// it
441 Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};
442 
443 /// Store number of calls in additional to target address (Key) and frequency
444 /// as perceived by the basic block counter (Val).
445 struct CallFlowEntryBase : public SimpleHashTableEntryBase {
446   uint64_t Calls;
447 };
448 
449 using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;
450 
451 /// This is a large table indexing all possible call targets (indirect and
452 /// direct ones). The goal is to find mismatches between number of calls (for
453 /// those calls we were able to track) and the entry basic block counter of the
454 /// callee. In most cases, these two should be equal. If not, there are two
455 /// possible scenarios here:
456 ///
457 ///  * Entry BB has higher frequency than all known calls to this function.
458 ///    In this case, we have dynamic library code or any uninstrumented code
459 ///    calling this function. We will write the profile for these untracked
460 ///    calls as having source "0 [unknown] 0" in the fdata file.
461 ///
462 ///  * Number of known calls is higher than the frequency of entry BB
463 ///    This only happens when there is no counter for the entry BB / callee
464 ///    function is not simple (in BOLT terms). We don't do anything special
465 ///    here and just ignore those (we still report all calls to the non-simple
466 ///    function, though).
467 ///
468 class CallFlowHashTable : public CallFlowHashTableBase {
469 public:
470   CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
471 
472   MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }
473 
474 private:
475   // Different than the hash table for indirect call targets, we do store the
476   // allocator here since there is only one call flow hash and space overhead
477   // is negligible.
478   BumpPtrAllocator &Alloc;
479 };
480 
481 ///
482 /// Description metadata emitted by BOLT to describe the program - refer to
483 /// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
484 ///
485 struct Location {
486   uint32_t FunctionName;
487   uint32_t Offset;
488 };
489 
490 struct CallDescription {
491   Location From;
492   uint32_t FromNode;
493   Location To;
494   uint32_t Counter;
495   uint64_t TargetAddress;
496 };
497 
498 using IndCallDescription = Location;
499 
500 struct IndCallTargetDescription {
501   Location Loc;
502   uint64_t Address;
503 };
504 
505 struct EdgeDescription {
506   Location From;
507   uint32_t FromNode;
508   Location To;
509   uint32_t ToNode;
510   uint32_t Counter;
511 };
512 
513 struct InstrumentedNode {
514   uint32_t Node;
515   uint32_t Counter;
516 };
517 
518 struct EntryNode {
519   uint64_t Node;
520   uint64_t Address;
521 };
522 
523 struct FunctionDescription {
524   uint32_t NumLeafNodes;
525   const InstrumentedNode *LeafNodes;
526   uint32_t NumEdges;
527   const EdgeDescription *Edges;
528   uint32_t NumCalls;
529   const CallDescription *Calls;
530   uint32_t NumEntryNodes;
531   const EntryNode *EntryNodes;
532 
533   /// Constructor will parse the serialized function metadata written by BOLT
534   FunctionDescription(const uint8_t *FuncDesc);
535 
536   uint64_t getSize() const {
537     return 16 + NumLeafNodes * sizeof(InstrumentedNode) +
538            NumEdges * sizeof(EdgeDescription) +
539            NumCalls * sizeof(CallDescription) +
540            NumEntryNodes * sizeof(EntryNode);
541   }
542 };
543 
544 /// The context is created when the fdata profile needs to be written to disk
545 /// and we need to interpret our runtime counters. It contains pointers to the
546 /// mmaped binary (only the BOLT written metadata section). Deserialization
547 /// should be straightforward as most data is POD or an array of POD elements.
548 /// This metadata is used to reconstruct function CFGs.
549 struct ProfileWriterContext {
550   IndCallDescription *IndCallDescriptions;
551   IndCallTargetDescription *IndCallTargets;
552   uint8_t *FuncDescriptions;
553   char *Strings;  // String table with function names used in this binary
554   int FileDesc;   // File descriptor for the file on disk backing this
555                   // information in memory via mmap
556   void *MMapPtr;  // The mmap ptr
557   int MMapSize;   // The mmap size
558 
559   /// Hash table storing all possible call destinations to detect untracked
560   /// calls and correctly report them as [unknown] in output fdata.
561   CallFlowHashTable *CallFlowTable;
562 
563   /// Lookup the sorted indirect call target vector to fetch function name and
564   /// offset for an arbitrary function pointer.
565   const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;
566 };
567 
568 /// Perform a string comparison and returns zero if Str1 matches Str2. Compares
569 /// at most Size characters.
570 int compareStr(const char *Str1, const char *Str2, int Size) {
571   while (*Str1 == *Str2) {
572     if (*Str1 == '\0' || --Size == 0)
573       return 0;
574     ++Str1;
575     ++Str2;
576   }
577   return 1;
578 }
579 
580 /// Output Location to the fdata file
581 char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,
582                    const Location Loc, uint32_t BufSize) {
583   // fdata location format: Type Name Offset
584   // Type 1 - regular symbol
585   OutBuf = strCopy(OutBuf, "1 ");
586   const char *Str = Ctx.Strings + Loc.FunctionName;
587   uint32_t Size = 25;
588   while (*Str) {
589     *OutBuf++ = *Str++;
590     if (++Size >= BufSize)
591       break;
592   }
593   assert(!*Str, "buffer overflow, function name too large");
594   *OutBuf++ = ' ';
595   OutBuf = intToStr(OutBuf, Loc.Offset, 16);
596   *OutBuf++ = ' ';
597   return OutBuf;
598 }
599 
600 /// Read and deserialize a function description written by BOLT. \p FuncDesc
601 /// points at the beginning of the function metadata structure in the file.
602 /// See Instrumentation::emitTablesAsELFNote()
603 FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {
604   NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);
605   DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10));
606   LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);
607 
608   NumEdges = *reinterpret_cast<const uint32_t *>(
609       FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));
610   DEBUG(reportNumber("NumEdges = ", NumEdges, 10));
611   Edges = reinterpret_cast<const EdgeDescription *>(
612       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));
613 
614   NumCalls = *reinterpret_cast<const uint32_t *>(
615       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +
616       NumEdges * sizeof(EdgeDescription));
617   DEBUG(reportNumber("NumCalls = ", NumCalls, 10));
618   Calls = reinterpret_cast<const CallDescription *>(
619       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
620       NumEdges * sizeof(EdgeDescription));
621   NumEntryNodes = *reinterpret_cast<const uint32_t *>(
622       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
623       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
624   DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10));
625   EntryNodes = reinterpret_cast<const EntryNode *>(
626       FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +
627       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
628 }
629 
630 /// Read and mmap descriptions written by BOLT from the executable's notes
631 /// section
632 #if defined(HAVE_ELF_H) and !defined(__APPLE__)
633 
634 void *__attribute__((noinline)) __get_pc() {
635   return __builtin_extract_return_addr(__builtin_return_address(0));
636 }
637 
638 /// Get string with address and parse it to hex pair <StartAddress, EndAddress>
639 bool parseAddressRange(const char *Str, uint64_t &StartAddress,
640                        uint64_t &EndAddress) {
641   if (!Str)
642     return false;
643   // Parsed string format: <hex1>-<hex2>
644   StartAddress = hexToLong(Str, '-');
645   while (*Str && *Str != '-')
646     ++Str;
647   if (!*Str)
648     return false;
649   ++Str; // swallow '-'
650   EndAddress = hexToLong(Str);
651   return true;
652 }
653 
654 /// Get full path to the real binary by getting current virtual address
655 /// and searching for the appropriate link in address range in
656 /// /proc/self/map_files
657 static char *getBinaryPath() {
658   const uint32_t BufSize = 1024;
659   const uint32_t NameMax = 4096;
660   const char DirPath[] = "/proc/self/map_files/";
661   static char TargetPath[NameMax] = {};
662   char Buf[BufSize];
663 
664   if (__bolt_instr_binpath[0] != '\0')
665     return __bolt_instr_binpath;
666 
667   if (TargetPath[0] != '\0')
668     return TargetPath;
669 
670   unsigned long CurAddr = (unsigned long)__get_pc();
671   uint64_t FDdir = __open(DirPath,
672                           /*flags=*/0 /*O_RDONLY*/,
673                           /*mode=*/0666);
674   assert(static_cast<int64_t>(FDdir) >= 0,
675          "failed to open /proc/self/map_files");
676 
677   while (long Nread = __getdents(FDdir, (struct dirent *)Buf, BufSize)) {
678     assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");
679 
680     struct dirent *d;
681     for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {
682       d = (struct dirent *)(Buf + Bpos);
683 
684       uint64_t StartAddress, EndAddress;
685       if (!parseAddressRange(d->d_name, StartAddress, EndAddress))
686         continue;
687       if (CurAddr < StartAddress || CurAddr > EndAddress)
688         continue;
689       char FindBuf[NameMax];
690       char *C = strCopy(FindBuf, DirPath, NameMax);
691       C = strCopy(C, d->d_name, NameMax - (C - FindBuf));
692       *C = '\0';
693       uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));
694       assert(Ret != -1 && Ret != BufSize, "readlink error");
695       TargetPath[Ret] = '\0';
696       return TargetPath;
697     }
698   }
699   return nullptr;
700 }
701 
702 ProfileWriterContext readDescriptions() {
703   ProfileWriterContext Result;
704   char *BinPath = getBinaryPath();
705   assert(BinPath && BinPath[0] != '\0', "failed to find binary path");
706 
707   uint64_t FD = __open(BinPath,
708                        /*flags=*/0 /*O_RDONLY*/,
709                        /*mode=*/0666);
710   assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");
711 
712   Result.FileDesc = FD;
713 
714   // mmap our binary to memory
715   uint64_t Size = __lseek(FD, 0, 2 /*SEEK_END*/);
716   uint8_t *BinContents = reinterpret_cast<uint8_t *>(
717       __mmap(0, Size, 0x1 /* PROT_READ*/, 0x2 /* MAP_PRIVATE*/, FD, 0));
718   Result.MMapPtr = BinContents;
719   Result.MMapSize = Size;
720   Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);
721   Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);
722   Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(
723       BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);
724 
725   // Find .bolt.instr.tables with the data we need and set pointers to it
726   for (int I = 0; I < Hdr->e_shnum; ++I) {
727     char *SecName = reinterpret_cast<char *>(
728         BinContents + StringTblHeader->sh_offset + Shdr->sh_name);
729     if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {
730       Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +
731                                             (I + 1) * Hdr->e_shentsize);
732       continue;
733     }
734     // Actual contents of the ELF note start after offset 20 decimal:
735     // Offset 0: Producer name size (4 bytes)
736     // Offset 4: Contents size (4 bytes)
737     // Offset 8: Note type (4 bytes)
738     // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)
739     // Offset 20: Contents
740     uint32_t IndCallDescSize =
741         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);
742     uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(
743         BinContents + Shdr->sh_offset + 24 + IndCallDescSize);
744     uint32_t FuncDescSize =
745         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +
746                                       IndCallDescSize + IndCallTargetDescSize);
747     Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(
748         BinContents + Shdr->sh_offset + 24);
749     Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
750         BinContents + Shdr->sh_offset + 28 + IndCallDescSize);
751     Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +
752                               IndCallDescSize + IndCallTargetDescSize;
753     Result.Strings = reinterpret_cast<char *>(
754         BinContents + Shdr->sh_offset + 32 + IndCallDescSize +
755         IndCallTargetDescSize + FuncDescSize);
756     return Result;
757   }
758   const char ErrMsg[] =
759       "BOLT instrumentation runtime error: could not find section "
760       ".bolt.instr.tables\n";
761   reportError(ErrMsg, sizeof(ErrMsg));
762   return Result;
763 }
764 
765 #else
766 
767 ProfileWriterContext readDescriptions() {
768   ProfileWriterContext Result;
769   uint8_t *Tables = _bolt_instr_tables_getter();
770   uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);
771   uint32_t IndCallTargetDescSize =
772       *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);
773   uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(
774       Tables + 8 + IndCallDescSize + IndCallTargetDescSize);
775   Result.IndCallDescriptions =
776       reinterpret_cast<IndCallDescription *>(Tables + 4);
777   Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
778       Tables + 8 + IndCallDescSize);
779   Result.FuncDescriptions =
780       Tables + 12 + IndCallDescSize + IndCallTargetDescSize;
781   Result.Strings = reinterpret_cast<char *>(
782       Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);
783   return Result;
784 }
785 
786 #endif
787 
788 #if !defined(__APPLE__)
789 /// Debug by printing overall metadata global numbers to check it is sane
790 void printStats(const ProfileWriterContext &Ctx) {
791   char StatMsg[BufSize];
792   char *StatPtr = StatMsg;
793   StatPtr =
794       strCopy(StatPtr,
795               "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");
796   StatPtr = intToStr(StatPtr,
797                      Ctx.FuncDescriptions -
798                          reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),
799                      10);
800   StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");
801   StatPtr = intToStr(
802       StatPtr,
803       reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);
804   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");
805   StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);
806   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");
807   StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);
808   StatPtr = strCopy(StatPtr, "\n");
809   __write(2, StatMsg, StatPtr - StatMsg);
810 }
811 #endif
812 
813 
814 /// This is part of a simple CFG representation in memory, where we store
815 /// a dynamically sized array of input and output edges per node, and store
816 /// a dynamically sized array of nodes per graph. We also store the spanning
817 /// tree edges for that CFG in a separate array of nodes in
818 /// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
819 struct Edge {
820   uint32_t Node; // Index in nodes array regarding the destination of this edge
821   uint32_t ID;   // Edge index in an array comprising all edges of the graph
822 };
823 
824 /// A regular graph node or a spanning tree node
825 struct Node {
826   uint32_t NumInEdges{0};  // Input edge count used to size InEdge
827   uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges
828   Edge *InEdges{nullptr};  // Created and managed by \p Graph
829   Edge *OutEdges{nullptr}; // ditto
830 };
831 
832 /// Main class for CFG representation in memory. Manages object creation and
833 /// destruction, populates an array of CFG nodes as well as corresponding
834 /// spanning tree nodes.
835 struct Graph {
836   uint32_t NumNodes;
837   Node *CFGNodes;
838   Node *SpanningTreeNodes;
839   uint64_t *EdgeFreqs;
840   uint64_t *CallFreqs;
841   BumpPtrAllocator &Alloc;
842   const FunctionDescription &D;
843 
844   /// Reads a list of edges from function description \p D and builds
845   /// the graph from it. Allocates several internal dynamic structures that are
846   /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all
847   /// spanning tree leaf nodes descriptions (their counters). They are the seed
848   /// used to compute the rest of the missing edge counts in a bottom-up
849   /// traversal of the spanning tree.
850   Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
851         const uint64_t *Counters, ProfileWriterContext &Ctx);
852   ~Graph();
853   void dump() const;
854 
855 private:
856   void computeEdgeFrequencies(const uint64_t *Counters,
857                               ProfileWriterContext &Ctx);
858   void dumpEdgeFreqs() const;
859 };
860 
861 Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
862              const uint64_t *Counters, ProfileWriterContext &Ctx)
863     : Alloc(Alloc), D(D) {
864   DEBUG(reportNumber("G = 0x", (uint64_t)this, 16));
865   // First pass to determine number of nodes
866   int32_t MaxNodes = -1;
867   CallFreqs = nullptr;
868   EdgeFreqs = nullptr;
869   for (int I = 0; I < D.NumEdges; ++I) {
870     if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)
871       MaxNodes = D.Edges[I].FromNode;
872     if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)
873       MaxNodes = D.Edges[I].ToNode;
874   }
875 
876   for (int I = 0; I < D.NumLeafNodes; ++I)
877     if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)
878       MaxNodes = D.LeafNodes[I].Node;
879 
880   for (int I = 0; I < D.NumCalls; ++I)
881     if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)
882       MaxNodes = D.Calls[I].FromNode;
883 
884   // No nodes? Nothing to do
885   if (MaxNodes < 0) {
886     DEBUG(report("No nodes!\n"));
887     CFGNodes = nullptr;
888     SpanningTreeNodes = nullptr;
889     NumNodes = 0;
890     return;
891   }
892   ++MaxNodes;
893   DEBUG(reportNumber("NumNodes = ", MaxNodes, 10));
894   NumNodes = static_cast<uint32_t>(MaxNodes);
895 
896   // Initial allocations
897   CFGNodes = new (Alloc) Node[MaxNodes];
898 
899   DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16));
900   SpanningTreeNodes = new (Alloc) Node[MaxNodes];
901   DEBUG(reportNumber("G->SpanningTreeNodes = 0x",
902                      (uint64_t)SpanningTreeNodes, 16));
903 
904   // Figure out how much to allocate to each vector (in/out edge sets)
905   for (int I = 0; I < D.NumEdges; ++I) {
906     CFGNodes[D.Edges[I].FromNode].NumOutEdges++;
907     CFGNodes[D.Edges[I].ToNode].NumInEdges++;
908     if (D.Edges[I].Counter != 0xffffffff)
909       continue;
910 
911     SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;
912     SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;
913   }
914 
915   // Allocate in/out edge sets
916   for (int I = 0; I < MaxNodes; ++I) {
917     if (CFGNodes[I].NumInEdges > 0)
918       CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];
919     if (CFGNodes[I].NumOutEdges > 0)
920       CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];
921     if (SpanningTreeNodes[I].NumInEdges > 0)
922       SpanningTreeNodes[I].InEdges =
923           new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];
924     if (SpanningTreeNodes[I].NumOutEdges > 0)
925       SpanningTreeNodes[I].OutEdges =
926           new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];
927     CFGNodes[I].NumInEdges = 0;
928     CFGNodes[I].NumOutEdges = 0;
929     SpanningTreeNodes[I].NumInEdges = 0;
930     SpanningTreeNodes[I].NumOutEdges = 0;
931   }
932 
933   // Fill in/out edge sets
934   for (int I = 0; I < D.NumEdges; ++I) {
935     const uint32_t Src = D.Edges[I].FromNode;
936     const uint32_t Dst = D.Edges[I].ToNode;
937     Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];
938     E->Node = Dst;
939     E->ID = I;
940 
941     E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];
942     E->Node = Src;
943     E->ID = I;
944 
945     if (D.Edges[I].Counter != 0xffffffff)
946       continue;
947 
948     E = &SpanningTreeNodes[Src]
949              .OutEdges[SpanningTreeNodes[Src].NumOutEdges++];
950     E->Node = Dst;
951     E->ID = I;
952 
953     E = &SpanningTreeNodes[Dst]
954              .InEdges[SpanningTreeNodes[Dst].NumInEdges++];
955     E->Node = Src;
956     E->ID = I;
957   }
958 
959   computeEdgeFrequencies(Counters, Ctx);
960 }
961 
962 Graph::~Graph() {
963   if (CallFreqs)
964     Alloc.deallocate(CallFreqs);
965   if (EdgeFreqs)
966     Alloc.deallocate(EdgeFreqs);
967   for (int I = NumNodes - 1; I >= 0; --I) {
968     if (SpanningTreeNodes[I].OutEdges)
969       Alloc.deallocate(SpanningTreeNodes[I].OutEdges);
970     if (SpanningTreeNodes[I].InEdges)
971       Alloc.deallocate(SpanningTreeNodes[I].InEdges);
972     if (CFGNodes[I].OutEdges)
973       Alloc.deallocate(CFGNodes[I].OutEdges);
974     if (CFGNodes[I].InEdges)
975       Alloc.deallocate(CFGNodes[I].InEdges);
976   }
977   if (SpanningTreeNodes)
978     Alloc.deallocate(SpanningTreeNodes);
979   if (CFGNodes)
980     Alloc.deallocate(CFGNodes);
981 }
982 
983 void Graph::dump() const {
984   reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);
985   report("  Full graph:\n");
986   for (int I = 0; I < NumNodes; ++I) {
987     const Node *N = &CFGNodes[I];
988     reportNumber("    Node #", I, 10);
989     reportNumber("      InEdges total ", N->NumInEdges, 10);
990     for (int J = 0; J < N->NumInEdges; ++J)
991       reportNumber("        ", N->InEdges[J].Node, 10);
992     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
993     for (int J = 0; J < N->NumOutEdges; ++J)
994       reportNumber("        ", N->OutEdges[J].Node, 10);
995     report("\n");
996   }
997   report("  Spanning tree:\n");
998   for (int I = 0; I < NumNodes; ++I) {
999     const Node *N = &SpanningTreeNodes[I];
1000     reportNumber("    Node #", I, 10);
1001     reportNumber("      InEdges total ", N->NumInEdges, 10);
1002     for (int J = 0; J < N->NumInEdges; ++J)
1003       reportNumber("        ", N->InEdges[J].Node, 10);
1004     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
1005     for (int J = 0; J < N->NumOutEdges; ++J)
1006       reportNumber("        ", N->OutEdges[J].Node, 10);
1007     report("\n");
1008   }
1009 }
1010 
1011 void Graph::dumpEdgeFreqs() const {
1012   reportNumber(
1013       "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);
1014   for (int I = 0; I < D.NumEdges; ++I) {
1015     reportNumber("* Src: ", D.Edges[I].FromNode, 10);
1016     reportNumber("  Dst: ", D.Edges[I].ToNode, 10);
1017     reportNumber("    Cnt: ", EdgeFreqs[I], 10);
1018   }
1019 }
1020 
1021 /// Auxiliary map structure for fast lookups of which calls map to each node of
1022 /// the function CFG
1023 struct NodeToCallsMap {
1024   struct MapEntry {
1025     uint32_t NumCalls;
1026     uint32_t *Calls;
1027   };
1028   MapEntry *Entries;
1029   BumpPtrAllocator &Alloc;
1030   const uint32_t NumNodes;
1031 
1032   NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,
1033                  uint32_t NumNodes)
1034       : Alloc(Alloc), NumNodes(NumNodes) {
1035     Entries = new (Alloc, 0) MapEntry[NumNodes];
1036     for (int I = 0; I < D.NumCalls; ++I) {
1037       DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10));
1038       ++Entries[D.Calls[I].FromNode].NumCalls;
1039     }
1040     for (int I = 0; I < NumNodes; ++I) {
1041       Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)
1042                                                    uint32_t[Entries[I].NumCalls]
1043                                              : nullptr;
1044       Entries[I].NumCalls = 0;
1045     }
1046     for (int I = 0; I < D.NumCalls; ++I) {
1047       MapEntry &Entry = Entries[D.Calls[I].FromNode];
1048       Entry.Calls[Entry.NumCalls++] = I;
1049     }
1050   }
1051 
1052   /// Set the frequency of all calls in node \p NodeID to Freq. However, if
1053   /// the calls have their own counters and do not depend on the basic block
1054   /// counter, this means they have landing pads and throw exceptions. In this
1055   /// case, set their frequency with their counters and return the maximum
1056   /// value observed in such counters. This will be used as the new frequency
1057   /// at basic block entry. This is used to fix the CFG edge frequencies in the
1058   /// presence of exceptions.
1059   uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,
1060                            const FunctionDescription &D,
1061                            const uint64_t *Counters,
1062                            ProfileWriterContext &Ctx) const {
1063     const MapEntry &Entry = Entries[NodeID];
1064     uint64_t MaxValue = 0ull;
1065     for (int I = 0, E = Entry.NumCalls; I != E; ++I) {
1066       const uint32_t CallID = Entry.Calls[I];
1067       DEBUG(reportNumber("  Setting freq for call ID: ", CallID, 10));
1068       const CallDescription &CallDesc = D.Calls[CallID];
1069       if (CallDesc.Counter == 0xffffffff) {
1070         CallFreqs[CallID] = Freq;
1071         DEBUG(reportNumber("  with : ", Freq, 10));
1072       } else {
1073         const uint64_t CounterVal = Counters[CallDesc.Counter];
1074         CallFreqs[CallID] = CounterVal;
1075         MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;
1076         DEBUG(reportNumber("  with (private counter) : ", CounterVal, 10));
1077       }
1078       DEBUG(reportNumber("  Address: 0x", CallDesc.TargetAddress, 16));
1079       if (CallFreqs[CallID] > 0)
1080         Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=
1081             CallFreqs[CallID];
1082     }
1083     return MaxValue;
1084   }
1085 
1086   ~NodeToCallsMap() {
1087     for (int I = NumNodes - 1; I >= 0; --I)
1088       if (Entries[I].Calls)
1089         Alloc.deallocate(Entries[I].Calls);
1090     Alloc.deallocate(Entries);
1091   }
1092 };
1093 
1094 /// Fill an array with the frequency of each edge in the function represented
1095 /// by G, as well as another array for each call.
1096 void Graph::computeEdgeFrequencies(const uint64_t *Counters,
1097                                    ProfileWriterContext &Ctx) {
1098   if (NumNodes == 0)
1099     return;
1100 
1101   EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;
1102   CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;
1103 
1104   // Setup a lookup for calls present in each node (BB)
1105   NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);
1106 
1107   // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the
1108   // spanning tree don't have explicit counters. We must infer their value using
1109   // a linear combination of other counters (sum of counters of the outgoing
1110   // edges minus sum of counters of the incoming edges).
1111   uint32_t *Stack = new (Alloc) uint32_t [NumNodes];
1112   uint32_t StackTop = 0;
1113   enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };
1114   Status *Visited = new (Alloc, 0) Status[NumNodes];
1115   uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];
1116   uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];
1117 
1118   // Setup a fast lookup for frequency of leaf nodes, which have special
1119   // basic block frequency instrumentation (they are not edge profiled).
1120   for (int I = 0; I < D.NumLeafNodes; ++I) {
1121     LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];
1122     DEBUG({
1123       if (Counters[D.LeafNodes[I].Counter] > 0) {
1124         reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);
1125         reportNumber("     Counter: ", Counters[D.LeafNodes[I].Counter], 10);
1126       }
1127     });
1128   }
1129   for (int I = 0; I < D.NumEntryNodes; ++I) {
1130     EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;
1131     DEBUG({
1132         reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);
1133         reportNumber("      Address: ", D.EntryNodes[I].Address, 16);
1134     });
1135   }
1136   // Add all root nodes to the stack
1137   for (int I = 0; I < NumNodes; ++I)
1138     if (SpanningTreeNodes[I].NumInEdges == 0)
1139       Stack[StackTop++] = I;
1140 
1141   // Empty stack?
1142   if (StackTop == 0) {
1143     DEBUG(report("Empty stack!\n"));
1144     Alloc.deallocate(EntryAddress);
1145     Alloc.deallocate(LeafFrequency);
1146     Alloc.deallocate(Visited);
1147     Alloc.deallocate(Stack);
1148     CallMap->~NodeToCallsMap();
1149     Alloc.deallocate(CallMap);
1150     if (CallFreqs)
1151       Alloc.deallocate(CallFreqs);
1152     if (EdgeFreqs)
1153       Alloc.deallocate(EdgeFreqs);
1154     EdgeFreqs = nullptr;
1155     CallFreqs = nullptr;
1156     return;
1157   }
1158   // Add all known edge counts, will infer the rest
1159   for (int I = 0; I < D.NumEdges; ++I) {
1160     const uint32_t C = D.Edges[I].Counter;
1161     if (C == 0xffffffff) // inferred counter - we will compute its value
1162       continue;
1163     EdgeFreqs[I] = Counters[C];
1164   }
1165 
1166   while (StackTop > 0) {
1167     const uint32_t Cur = Stack[--StackTop];
1168     DEBUG({
1169       if (Visited[Cur] == S_VISITING)
1170         report("(visiting) ");
1171       else
1172         report("(new) ");
1173       reportNumber("Cur: ", Cur, 10);
1174     });
1175 
1176     // This shouldn't happen in a tree
1177     assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");
1178     if (Visited[Cur] == S_NEW) {
1179       Visited[Cur] = S_VISITING;
1180       Stack[StackTop++] = Cur;
1181       assert(StackTop <= NumNodes, "stack grew too large");
1182       for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {
1183         const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;
1184         Stack[StackTop++] = Succ;
1185         assert(StackTop <= NumNodes, "stack grew too large");
1186       }
1187       continue;
1188     }
1189     Visited[Cur] = S_VISITED;
1190 
1191     // Establish our node frequency based on outgoing edges, which should all be
1192     // resolved by now.
1193     int64_t CurNodeFreq = LeafFrequency[Cur];
1194     // Not a leaf?
1195     if (!CurNodeFreq) {
1196       for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {
1197         const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;
1198         CurNodeFreq += EdgeFreqs[SuccEdge];
1199       }
1200     }
1201     if (CurNodeFreq < 0)
1202       CurNodeFreq = 0;
1203 
1204     const uint64_t CallFreq = CallMap->visitAllCallsIn(
1205         Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx);
1206 
1207     // Exception handling affected our output flow? Fix with calls info
1208     DEBUG({
1209       if (CallFreq > CurNodeFreq)
1210         report("Bumping node frequency with call info\n");
1211     });
1212     CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;
1213 
1214     if (CurNodeFreq > 0) {
1215       if (uint64_t Addr = EntryAddress[Cur]) {
1216         DEBUG(
1217             reportNumber("  Setting flow at entry point address 0x", Addr, 16));
1218         DEBUG(reportNumber("  with: ", CurNodeFreq, 10));
1219         Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;
1220       }
1221     }
1222 
1223     // No parent? Reached a tree root, limit to call frequency updating.
1224     if (SpanningTreeNodes[Cur].NumInEdges == 0)
1225       continue;
1226 
1227     assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
1228     const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
1229     const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
1230 
1231     // Calculate parent edge freq.
1232     int64_t ParentEdgeFreq = CurNodeFreq;
1233     for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {
1234       const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;
1235       ParentEdgeFreq -= EdgeFreqs[PredEdge];
1236     }
1237 
1238     // Sometimes the conservative CFG that BOLT builds will lead to incorrect
1239     // flow computation. For example, in a BB that transitively calls the exit
1240     // syscall, BOLT will add a fall-through successor even though it should not
1241     // have any successors. So this block execution will likely be wrong. We
1242     // tolerate this imperfection since this case should be quite infrequent.
1243     if (ParentEdgeFreq < 0) {
1244       DEBUG(dumpEdgeFreqs());
1245       DEBUG(report("WARNING: incorrect flow"));
1246       ParentEdgeFreq = 0;
1247     }
1248     DEBUG(reportNumber("  Setting freq for ParentEdge: ", ParentEdge, 10));
1249     DEBUG(reportNumber("  with ParentEdgeFreq: ", ParentEdgeFreq, 10));
1250     EdgeFreqs[ParentEdge] = ParentEdgeFreq;
1251   }
1252 
1253   Alloc.deallocate(EntryAddress);
1254   Alloc.deallocate(LeafFrequency);
1255   Alloc.deallocate(Visited);
1256   Alloc.deallocate(Stack);
1257   CallMap->~NodeToCallsMap();
1258   Alloc.deallocate(CallMap);
1259   DEBUG(dumpEdgeFreqs());
1260 }
1261 
1262 /// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
1263 /// \p Alloc to allocate helper dynamic structures used to compute profile for
1264 /// edges that we do not explictly instrument.
1265 const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,
1266                                     const uint8_t *FuncDesc,
1267                                     BumpPtrAllocator &Alloc) {
1268   const FunctionDescription F(FuncDesc);
1269   const uint8_t *next = FuncDesc + F.getSize();
1270 
1271 #if !defined(__APPLE__)
1272   uint64_t *bolt_instr_locations = __bolt_instr_locations;
1273 #else
1274   uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();
1275 #endif
1276 
1277   // Skip funcs we know are cold
1278 #ifndef ENABLE_DEBUG
1279   uint64_t CountersFreq = 0;
1280   for (int I = 0; I < F.NumLeafNodes; ++I)
1281     CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];
1282 
1283   if (CountersFreq == 0) {
1284     for (int I = 0; I < F.NumEdges; ++I) {
1285       const uint32_t C = F.Edges[I].Counter;
1286       if (C == 0xffffffff)
1287         continue;
1288       CountersFreq += bolt_instr_locations[C];
1289     }
1290     if (CountersFreq == 0) {
1291       for (int I = 0; I < F.NumCalls; ++I) {
1292         const uint32_t C = F.Calls[I].Counter;
1293         if (C == 0xffffffff)
1294           continue;
1295         CountersFreq += bolt_instr_locations[C];
1296       }
1297       if (CountersFreq == 0)
1298         return next;
1299     }
1300   }
1301 #endif
1302 
1303   Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);
1304   DEBUG(G->dump());
1305 
1306   if (!G->EdgeFreqs && !G->CallFreqs) {
1307     G->~Graph();
1308     Alloc.deallocate(G);
1309     return next;
1310   }
1311 
1312   for (int I = 0; I < F.NumEdges; ++I) {
1313     const uint64_t Freq = G->EdgeFreqs[I];
1314     if (Freq == 0)
1315       continue;
1316     const EdgeDescription *Desc = &F.Edges[I];
1317     char LineBuf[BufSize];
1318     char *Ptr = LineBuf;
1319     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1320     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1321     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);
1322     Ptr = intToStr(Ptr, Freq, 10);
1323     *Ptr++ = '\n';
1324     __write(FD, LineBuf, Ptr - LineBuf);
1325   }
1326 
1327   for (int I = 0; I < F.NumCalls; ++I) {
1328     const uint64_t Freq = G->CallFreqs[I];
1329     if (Freq == 0)
1330       continue;
1331     char LineBuf[BufSize];
1332     char *Ptr = LineBuf;
1333     const CallDescription *Desc = &F.Calls[I];
1334     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1335     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1336     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1337     Ptr = intToStr(Ptr, Freq, 10);
1338     *Ptr++ = '\n';
1339     __write(FD, LineBuf, Ptr - LineBuf);
1340   }
1341 
1342   G->~Graph();
1343   Alloc.deallocate(G);
1344   return next;
1345 }
1346 
1347 #if !defined(__APPLE__)
1348 const IndCallTargetDescription *
1349 ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {
1350   uint32_t B = 0;
1351   uint32_t E = __bolt_instr_num_ind_targets;
1352   if (E == 0)
1353     return nullptr;
1354   do {
1355     uint32_t I = (E - B) / 2 + B;
1356     if (IndCallTargets[I].Address == Target)
1357       return &IndCallTargets[I];
1358     if (IndCallTargets[I].Address < Target)
1359       B = I + 1;
1360     else
1361       E = I;
1362   } while (B < E);
1363   return nullptr;
1364 }
1365 
1366 /// Write a single indirect call <src, target> pair to the fdata file
1367 void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,
1368                          int FD, int CallsiteID,
1369                          ProfileWriterContext *Ctx) {
1370   if (Entry.Val == 0)
1371     return;
1372   DEBUG(reportNumber("Target func 0x", Entry.Key, 16));
1373   DEBUG(reportNumber("Target freq: ", Entry.Val, 10));
1374   const IndCallDescription *CallsiteDesc =
1375       &Ctx->IndCallDescriptions[CallsiteID];
1376   const IndCallTargetDescription *TargetDesc =
1377       Ctx->lookupIndCallTarget(Entry.Key);
1378   if (!TargetDesc) {
1379     DEBUG(report("Failed to lookup indirect call target\n"));
1380     char LineBuf[BufSize];
1381     char *Ptr = LineBuf;
1382     Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1383     Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);
1384     Ptr = intToStr(Ptr, Entry.Val, 10);
1385     *Ptr++ = '\n';
1386     __write(FD, LineBuf, Ptr - LineBuf);
1387     return;
1388   }
1389   Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;
1390   char LineBuf[BufSize];
1391   char *Ptr = LineBuf;
1392   Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1393   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1394   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1395   Ptr = intToStr(Ptr, Entry.Val, 10);
1396   *Ptr++ = '\n';
1397   __write(FD, LineBuf, Ptr - LineBuf);
1398 }
1399 
1400 /// Write to \p FD all of the indirect call profiles.
1401 void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {
1402   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {
1403     DEBUG(reportNumber("IndCallsite #", I, 10));
1404     GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);
1405   }
1406 }
1407 
1408 /// Check a single call flow for a callee versus all known callers. If there are
1409 /// less callers than what the callee expects, write the difference with source
1410 /// [unknown] in the profile.
1411 void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
1412                         ProfileWriterContext *Ctx) {
1413   DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16));
1414   DEBUG(reportNumber("Calls: ", Entry.Calls, 10));
1415   DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10));
1416   DEBUG({
1417     if (Entry.Calls > Entry.Val)
1418       report("  More calls than expected!\n");
1419   });
1420   if (Entry.Val <= Entry.Calls)
1421     return;
1422   DEBUG(reportNumber(
1423       "  Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10));
1424   const IndCallTargetDescription *TargetDesc =
1425       Ctx->lookupIndCallTarget(Entry.Key);
1426   if (!TargetDesc) {
1427     // There is probably something wrong with this callee and this should be
1428     // investigated, but I don't want to assert and lose all data collected.
1429     DEBUG(report("WARNING: failed to look up call target!\n"));
1430     return;
1431   }
1432   char LineBuf[BufSize];
1433   char *Ptr = LineBuf;
1434   Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);
1435   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1436   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1437   Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);
1438   *Ptr++ = '\n';
1439   __write(FD, LineBuf, Ptr - LineBuf);
1440 }
1441 
1442 /// Open fdata file for writing and return a valid file descriptor, aborting
1443 /// program upon failure.
1444 int openProfile() {
1445   // Build the profile name string by appending our PID
1446   char Buf[BufSize];
1447   char *Ptr = Buf;
1448   uint64_t PID = __getpid();
1449   Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
1450   if (__bolt_instr_use_pid) {
1451     Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
1452     Ptr = intToStr(Ptr, PID, 10);
1453     Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));
1454   }
1455   *Ptr++ = '\0';
1456   uint64_t FD = __open(Buf,
1457                        /*flags=*/0x241 /*O_WRONLY|O_TRUNC|O_CREAT*/,
1458                        /*mode=*/0666);
1459   if (static_cast<int64_t>(FD) < 0) {
1460     report("Error while trying to open profile file for writing: ");
1461     report(Buf);
1462     reportNumber("\nFailed with error number: 0x",
1463                  0 - static_cast<int64_t>(FD), 16);
1464     __exit(1);
1465   }
1466   return FD;
1467 }
1468 
1469 #endif
1470 
1471 } // anonymous namespace
1472 
1473 #if !defined(__APPLE__)
1474 
1475 /// Reset all counters in case you want to start profiling a new phase of your
1476 /// program independently of prior phases.
1477 /// The address of this function is printed by BOLT and this can be called by
1478 /// any attached debugger during runtime. There is a useful oneliner for gdb:
1479 ///
1480 ///   gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
1481 ///     -ex 'set confirm off' -ex quit
1482 ///
1483 /// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
1484 /// name.
1485 extern "C" void __bolt_instr_clear_counters() {
1486   memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,
1487          __bolt_num_counters * 8);
1488   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)
1489     GlobalIndCallCounters[I].resetCounters();
1490 }
1491 
1492 /// This is the entry point for profile writing.
1493 /// There are three ways of getting here:
1494 ///
1495 ///  * Program execution ended, finalization methods are running and BOLT
1496 ///    hooked into FINI from your binary dynamic section;
1497 ///  * You used the sleep timer option and during initialization we forked
1498 ///    a separete process that will call this function periodically;
1499 ///  * BOLT prints this function address so you can attach a debugger and
1500 ///    call this function directly to get your profile written to disk
1501 ///    on demand.
1502 ///
1503 extern "C" void __attribute((force_align_arg_pointer))
1504 __bolt_instr_data_dump() {
1505   // Already dumping
1506   if (!GlobalWriteProfileMutex->acquire())
1507     return;
1508 
1509   BumpPtrAllocator HashAlloc;
1510   HashAlloc.setMaxSize(0x6400000);
1511   ProfileWriterContext Ctx = readDescriptions();
1512   Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);
1513 
1514   DEBUG(printStats(Ctx));
1515 
1516   int FD = openProfile();
1517 
1518   BumpPtrAllocator Alloc;
1519   Alloc.setMaxSize(0x6400000);
1520   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1521   for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {
1522     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1523     Alloc.clear();
1524     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1525   }
1526   assert(FuncDesc == (void *)Ctx.Strings,
1527          "FuncDesc ptr must be equal to stringtable");
1528 
1529   writeIndirectCallProfile(FD, Ctx);
1530   Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);
1531 
1532   __fsync(FD);
1533   __close(FD);
1534   __munmap(Ctx.MMapPtr, Ctx.MMapSize);
1535   __close(Ctx.FileDesc);
1536   HashAlloc.destroy();
1537   GlobalWriteProfileMutex->release();
1538   DEBUG(report("Finished writing profile.\n"));
1539 }
1540 
1541 /// Event loop for our child process spawned during setup to dump profile data
1542 /// at user-specified intervals
1543 void watchProcess() {
1544   timespec ts, rem;
1545   uint64_t Ellapsed = 0ull;
1546   uint64_t ppid;
1547   if (__bolt_instr_wait_forks) {
1548     // Store parent pgid
1549     ppid = -__getpgid(0);
1550     // And leave parent process group
1551     __setpgid(0, 0);
1552   } else {
1553     // Store parent pid
1554     ppid = __getppid();
1555     if (ppid == 1) {
1556       // Parent already dead
1557       __bolt_instr_data_dump();
1558       goto out;
1559     }
1560   }
1561 
1562   ts.tv_sec = 1;
1563   ts.tv_nsec = 0;
1564   while (1) {
1565     __nanosleep(&ts, &rem);
1566     // This means our parent process or all its forks are dead,
1567     // so no need for us to keep dumping.
1568     if (__kill(ppid, 0) < 0) {
1569       if (__bolt_instr_no_counters_clear)
1570         __bolt_instr_data_dump();
1571       break;
1572     }
1573 
1574     if (++Ellapsed < __bolt_instr_sleep_time)
1575       continue;
1576 
1577     Ellapsed = 0;
1578     __bolt_instr_data_dump();
1579     if (__bolt_instr_no_counters_clear == false)
1580       __bolt_instr_clear_counters();
1581   }
1582 
1583 out:;
1584   DEBUG(report("My parent process is dead, bye!\n"));
1585   __exit(0);
1586 }
1587 
1588 extern "C" void __bolt_instr_indirect_call();
1589 extern "C" void __bolt_instr_indirect_tailcall();
1590 
1591 /// Initialization code
1592 extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {
1593   const uint64_t CountersStart =
1594       reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);
1595   const uint64_t CountersEnd = alignTo(
1596       reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),
1597       0x1000);
1598   DEBUG(reportNumber("replace mmap start: ", CountersStart, 16));
1599   DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16));
1600   assert (CountersEnd > CountersStart, "no counters");
1601   // Maps our counters to be shared instead of private, so we keep counting for
1602   // forked processes
1603   __mmap(CountersStart, CountersEnd - CountersStart,
1604          0x3 /*PROT_READ|PROT_WRITE*/,
1605          0x31 /*MAP_ANONYMOUS | MAP_SHARED | MAP_FIXED*/, -1, 0);
1606 
1607   __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;
1608   __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;
1609   // Conservatively reserve 100MiB shared pages
1610   GlobalAlloc.setMaxSize(0x6400000);
1611   GlobalAlloc.setShared(true);
1612   GlobalWriteProfileMutex = new (GlobalAlloc, 0) Mutex();
1613   if (__bolt_instr_num_ind_calls > 0)
1614     GlobalIndCallCounters =
1615         new (GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];
1616 
1617   if (__bolt_instr_sleep_time != 0) {
1618     // Separate instrumented process to the own process group
1619     if (__bolt_instr_wait_forks)
1620       __setpgid(0, 0);
1621 
1622     if (long PID = __fork())
1623       return;
1624     watchProcess();
1625   }
1626 }
1627 
1628 extern "C" __attribute((force_align_arg_pointer)) void
1629 instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {
1630   GlobalIndCallCounters[IndCallID].incrementVal(Target, GlobalAlloc);
1631 }
1632 
1633 /// We receive as in-stack arguments the identifier of the indirect call site
1634 /// as well as the target address for the call
1635 extern "C" __attribute((naked)) void __bolt_instr_indirect_call()
1636 {
1637   __asm__ __volatile__(SAVE_ALL
1638                        "mov 0xa0(%%rsp), %%rdi\n"
1639                        "mov 0x98(%%rsp), %%rsi\n"
1640                        "call instrumentIndirectCall\n"
1641                        RESTORE_ALL
1642                        "ret\n"
1643                        :::);
1644 }
1645 
1646 extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()
1647 {
1648   __asm__ __volatile__(SAVE_ALL
1649                        "mov 0x98(%%rsp), %%rdi\n"
1650                        "mov 0x90(%%rsp), %%rsi\n"
1651                        "call instrumentIndirectCall\n"
1652                        RESTORE_ALL
1653                        "ret\n"
1654                        :::);
1655 }
1656 
1657 /// This is hooking ELF's entry, it needs to save all machine state.
1658 extern "C" __attribute((naked)) void __bolt_instr_start()
1659 {
1660   __asm__ __volatile__(SAVE_ALL
1661                        "call __bolt_instr_setup\n"
1662                        RESTORE_ALL
1663                        "jmp __bolt_start_trampoline\n"
1664                        :::);
1665 }
1666 
1667 /// This is hooking into ELF's DT_FINI
1668 extern "C" void __bolt_instr_fini() {
1669   __bolt_fini_trampoline();
1670   if (__bolt_instr_sleep_time == 0)
1671     __bolt_instr_data_dump();
1672   DEBUG(report("Finished.\n"));
1673 }
1674 
1675 #endif
1676 
1677 #if defined(__APPLE__)
1678 
1679 extern "C" void __bolt_instr_data_dump() {
1680   ProfileWriterContext Ctx = readDescriptions();
1681 
1682   int FD = 2;
1683   BumpPtrAllocator Alloc;
1684   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1685   uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();
1686 
1687   for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {
1688     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1689     Alloc.clear();
1690     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1691   }
1692   assert(FuncDesc == (void *)Ctx.Strings,
1693          "FuncDesc ptr must be equal to stringtable");
1694 }
1695 
1696 // On OSX/iOS the final symbol name of an extern "C" function/variable contains
1697 // one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
1698 extern "C"
1699 __attribute__((section("__TEXT,__setup")))
1700 __attribute__((force_align_arg_pointer))
1701 void _bolt_instr_setup() {
1702   __asm__ __volatile__(SAVE_ALL :::);
1703 
1704   report("Hello!\n");
1705 
1706   __asm__ __volatile__(RESTORE_ALL :::);
1707 }
1708 
1709 extern "C"
1710 __attribute__((section("__TEXT,__fini")))
1711 __attribute__((force_align_arg_pointer))
1712 void _bolt_instr_fini() {
1713   report("Bye!\n");
1714   __bolt_instr_data_dump();
1715 }
1716 
1717 #endif
1718 #endif
1719