xref: /llvm-project/bolt/runtime/instr.cpp (revision 8b23a853b9fa50be8448ca54e68b2c40279e5a60)
1 //===- bolt/runtime/instr.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
10 // not support linking modules with dependencies on one another into the final
11 // binary (TODO?), which means this library has to be self-contained in a single
12 // module.
13 //
14 // All extern declarations here need to be defined by BOLT itself. Those will be
15 // undefined symbols that BOLT needs to resolve by emitting these symbols with
16 // MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
17 // for defining the symbols here and these two files have a tight coupling: one
18 // working statically when you run BOLT and another during program runtime when
19 // you run an instrumented binary. The main goal here is to output an fdata file
20 // (BOLT profile) with the instrumentation counters inserted by the static pass.
21 // Counters for indirect calls are an exception, as we can't know them
22 // statically. These counters are created and managed here. To allow this, we
23 // need a minimal framework for allocating memory dynamically. We provide this
24 // with the BumpPtrAllocator class (not LLVM's, but our own version of it).
25 //
26 // Since this code is intended to be inserted into any executable, we decided to
27 // make it standalone and do not depend on any external libraries (i.e. language
28 // support libraries, such as glibc or stdc++). To allow this, we provide a few
29 // light implementations of common OS interacting functionalities using direct
30 // syscall wrappers. Our simple allocator doesn't manage deallocations that
31 // fragment the memory space, so it's stack based. This is the minimal framework
32 // provided here to allow processing instrumented counters and writing fdata.
33 //
34 // In the C++ idiom used here, we never use or rely on constructors or
35 // destructors for global objects. That's because those need support from the
36 // linker in initialization/finalization code, and we want to keep our linker
37 // very simple. Similarly, we don't create any global objects that are zero
38 // initialized, since those would need to go .bss, which our simple linker also
39 // don't support (TODO?).
40 //
41 //===----------------------------------------------------------------------===//
42 
43 #if defined (__x86_64__)
44 #include "common.h"
45 
46 // Enables a very verbose logging to stderr useful when debugging
47 //#define ENABLE_DEBUG
48 
49 #ifdef ENABLE_DEBUG
50 #define DEBUG(X)                                                               \
51   { X; }
52 #else
53 #define DEBUG(X)                                                               \
54   {}
55 #endif
56 
57 #pragma GCC visibility push(hidden)
58 
59 extern "C" {
60 
61 #if defined(__APPLE__)
62 extern uint64_t* _bolt_instr_locations_getter();
63 extern uint32_t _bolt_num_counters_getter();
64 
65 extern uint8_t* _bolt_instr_tables_getter();
66 extern uint32_t _bolt_instr_num_funcs_getter();
67 
68 #else
69 
70 // Main counters inserted by instrumentation, incremented during runtime when
71 // points of interest (locations) in the program are reached. Those are direct
72 // calls and direct and indirect branches (local ones). There are also counters
73 // for basic block execution if they are a spanning tree leaf and need to be
74 // counted in order to infer the execution count of other edges of the CFG.
75 extern uint64_t __bolt_instr_locations[];
76 extern uint32_t __bolt_num_counters;
77 // Descriptions are serialized metadata about binary functions written by BOLT,
78 // so we have a minimal understanding about the program structure. For a
79 // reference on the exact format of this metadata, see *Description structs,
80 // Location, IntrumentedNode and EntryNode.
81 // Number of indirect call site descriptions
82 extern uint32_t __bolt_instr_num_ind_calls;
83 // Number of indirect call target descriptions
84 extern uint32_t __bolt_instr_num_ind_targets;
85 // Number of function descriptions
86 extern uint32_t __bolt_instr_num_funcs;
87 // Time to sleep across dumps (when we write the fdata profile to disk)
88 extern uint32_t __bolt_instr_sleep_time;
89 // Do not clear counters across dumps, rewrite file with the updated values
90 extern bool __bolt_instr_no_counters_clear;
91 // Wait until all forks of instrumented process will finish
92 extern bool __bolt_instr_wait_forks;
93 // Filename to dump data to
94 extern char __bolt_instr_filename[];
95 // Instumented binary file path
96 extern char __bolt_instr_binpath[];
97 // If true, append current PID to the fdata filename when creating it so
98 // different invocations of the same program can be differentiated.
99 extern bool __bolt_instr_use_pid;
100 // Functions that will be used to instrument indirect calls. BOLT static pass
101 // will identify indirect calls and modify them to load the address in these
102 // trampolines and call this address instead. BOLT can't use direct calls to
103 // our handlers because our addresses here are not known at analysis time. We
104 // only support resolving dependencies from this file to the output of BOLT,
105 // *not* the other way around.
106 // TODO: We need better linking support to make that happen.
107 extern void (*__bolt_ind_call_counter_func_pointer)();
108 extern void (*__bolt_ind_tailcall_counter_func_pointer)();
109 // Function pointers to init/fini trampoline routines in the binary, so we can
110 // resume regular execution of these functions that we hooked
111 extern void __bolt_start_trampoline();
112 extern void __bolt_fini_trampoline();
113 
114 #endif
115 }
116 
117 namespace {
118 
119 /// A simple allocator that mmaps a fixed size region and manages this space
120 /// in a stack fashion, meaning you always deallocate the last element that
121 /// was allocated. In practice, we don't need to deallocate individual elements.
122 /// We monotonically increase our usage and then deallocate everything once we
123 /// are done processing something.
124 class BumpPtrAllocator {
125   /// This is written before each allocation and act as a canary to detect when
126   /// a bug caused our program to cross allocation boundaries.
127   struct EntryMetadata {
128     uint64_t Magic;
129     uint64_t AllocSize;
130   };
131 
132 public:
133   void *allocate(size_t Size) {
134     Lock L(M);
135 
136     if (StackBase == nullptr) {
137       StackBase = reinterpret_cast<uint8_t *>(
138           __mmap(0, MaxSize, PROT_READ | PROT_WRITE,
139                  (Shared ? MAP_SHARED : MAP_PRIVATE) | MAP_ANONYMOUS, -1, 0));
140       StackSize = 0;
141     }
142 
143     Size = alignTo(Size + sizeof(EntryMetadata), 16);
144     uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);
145     auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);
146     M->Magic = Magic;
147     M->AllocSize = Size;
148     StackSize += Size;
149     assert(StackSize < MaxSize, "allocator ran out of memory");
150     return AllocAddress;
151   }
152 
153 #ifdef DEBUG
154   /// Element-wise deallocation is only used for debugging to catch memory
155   /// bugs by checking magic bytes. Ordinarily, we reset the allocator once
156   /// we are done with it. Reset is done with clear(). There's no need
157   /// to deallocate each element individually.
158   void deallocate(void *Ptr) {
159     Lock L(M);
160     uint8_t MetadataOffset = sizeof(EntryMetadata);
161     auto *M = reinterpret_cast<EntryMetadata *>(
162         reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);
163     const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;
164     // Validate size
165     if (Ptr != StackTop - M->AllocSize) {
166       // Failed validation, check if it is a pointer returned by operator new []
167       MetadataOffset +=
168           sizeof(uint64_t); // Space for number of elements alloc'ed
169       M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -
170                                             MetadataOffset);
171       // Ok, it failed both checks if this assertion fails. Stop the program, we
172       // have a memory bug.
173       assert(Ptr == StackTop - M->AllocSize,
174              "must deallocate the last element alloc'ed");
175     }
176     assert(M->Magic == Magic, "allocator magic is corrupt");
177     StackSize -= M->AllocSize;
178   }
179 #else
180   void deallocate(void *) {}
181 #endif
182 
183   void clear() {
184     Lock L(M);
185     StackSize = 0;
186   }
187 
188   /// Set mmap reservation size (only relevant before first allocation)
189   void setMaxSize(uint64_t Size) { MaxSize = Size; }
190 
191   /// Set mmap reservation privacy (only relevant before first allocation)
192   void setShared(bool S) { Shared = S; }
193 
194   void destroy() {
195     if (StackBase == nullptr)
196       return;
197     __munmap(StackBase, MaxSize);
198   }
199 
200 private:
201   static constexpr uint64_t Magic = 0x1122334455667788ull;
202   uint64_t MaxSize = 0xa00000;
203   uint8_t *StackBase{nullptr};
204   uint64_t StackSize{0};
205   bool Shared{false};
206   Mutex M;
207 };
208 
209 /// Used for allocating indirect call instrumentation counters. Initialized by
210 /// __bolt_instr_setup, our initialization routine.
211 BumpPtrAllocator GlobalAlloc;
212 } // anonymous namespace
213 
214 // User-defined placement new operators. We only use those (as opposed to
215 // overriding the regular operator new) so we can keep our allocator in the
216 // stack instead of in a data section (global).
217 void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }
218 void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {
219   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
220   memset(Ptr, C, Sz);
221   return Ptr;
222 }
223 void *operator new[](size_t Sz, BumpPtrAllocator &A) {
224   return A.allocate(Sz);
225 }
226 void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {
227   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
228   memset(Ptr, C, Sz);
229   return Ptr;
230 }
231 // Only called during exception unwinding (useless). We must manually dealloc.
232 // C++ language weirdness
233 void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }
234 
235 namespace {
236 
237 // Disable instrumentation optimizations that sacrifice profile accuracy
238 extern "C" bool __bolt_instr_conservative;
239 
240 /// Basic key-val atom stored in our hash
241 struct SimpleHashTableEntryBase {
242   uint64_t Key;
243   uint64_t Val;
244   void dump(const char *Msg = nullptr) {
245     // TODO: make some sort of formatting function
246     // Currently we have to do it the ugly way because
247     // we want every message to be printed atomically via a single call to
248     // __write. If we use reportNumber() and others nultiple times, we'll get
249     // garbage in mulithreaded environment
250     char Buf[BufSize];
251     char *Ptr = Buf;
252     Ptr = intToStr(Ptr, __getpid(), 10);
253     *Ptr++ = ':';
254     *Ptr++ = ' ';
255     if (Msg)
256       Ptr = strCopy(Ptr, Msg, strLen(Msg));
257     *Ptr++ = '0';
258     *Ptr++ = 'x';
259     Ptr = intToStr(Ptr, (uint64_t)this, 16);
260     *Ptr++ = ':';
261     *Ptr++ = ' ';
262     Ptr = strCopy(Ptr, "MapEntry(0x", sizeof("MapEntry(0x") - 1);
263     Ptr = intToStr(Ptr, Key, 16);
264     *Ptr++ = ',';
265     *Ptr++ = ' ';
266     *Ptr++ = '0';
267     *Ptr++ = 'x';
268     Ptr = intToStr(Ptr, Val, 16);
269     *Ptr++ = ')';
270     *Ptr++ = '\n';
271     assert(Ptr - Buf < BufSize, "Buffer overflow!");
272     // print everything all at once for atomicity
273     __write(2, Buf, Ptr - Buf);
274   }
275 };
276 
277 /// This hash table implementation starts by allocating a table of size
278 /// InitialSize. When conflicts happen in this main table, it resolves
279 /// them by chaining a new table of size IncSize. It never reallocs as our
280 /// allocator doesn't support it. The key is intended to be function pointers.
281 /// There's no clever hash function (it's just x mod size, size being prime).
282 /// I never tuned the coefficientes in the modular equation (TODO)
283 /// This is used for indirect calls (each call site has one of this, so it
284 /// should have a small footprint) and for tallying call counts globally for
285 /// each target to check if we missed the origin of some calls (this one is a
286 /// large instantiation of this template, since it is global for all call sites)
287 template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,
288           uint32_t IncSize = 7>
289 class SimpleHashTable {
290 public:
291   using MapEntry = T;
292 
293   /// Increment by 1 the value of \p Key. If it is not in this table, it will be
294   /// added to the table and its value set to 1.
295   void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {
296     ++get(Key, Alloc).Val;
297   }
298 
299   /// Basic member accessing interface. Here we pass the allocator explicitly to
300   /// avoid storing a pointer to it as part of this table (remember there is one
301   /// hash for each indirect call site, so we wan't to minimize our footprint).
302   MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {
303     if (!__bolt_instr_conservative) {
304       TryLock L(M);
305       if (!L.isLocked())
306         return NoEntry;
307       return getOrAllocEntry(Key, Alloc);
308     }
309     Lock L(M);
310     return getOrAllocEntry(Key, Alloc);
311   }
312 
313   /// Traverses all elements in the table
314   template <typename... Args>
315   void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {
316     Lock L(M);
317     if (!TableRoot)
318       return;
319     return forEachElement(Callback, InitialSize, TableRoot, args...);
320   }
321 
322   void resetCounters();
323 
324 private:
325   constexpr static uint64_t VacantMarker = 0;
326   constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;
327 
328   MapEntry *TableRoot{nullptr};
329   MapEntry NoEntry;
330   Mutex M;
331 
332   template <typename... Args>
333   void forEachElement(void (*Callback)(MapEntry &, Args...),
334                       uint32_t NumEntries, MapEntry *Entries, Args... args) {
335     for (uint32_t I = 0; I < NumEntries; ++I) {
336       MapEntry &Entry = Entries[I];
337       if (Entry.Key == VacantMarker)
338         continue;
339       if (Entry.Key & FollowUpTableMarker) {
340         forEachElement(Callback, IncSize,
341                        reinterpret_cast<MapEntry *>(Entry.Key &
342                                                     ~FollowUpTableMarker),
343                        args...);
344         continue;
345       }
346       Callback(Entry, args...);
347     }
348   }
349 
350   MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {
351     TableRoot = new (Alloc, 0) MapEntry[InitialSize];
352     MapEntry &Entry = TableRoot[Key % InitialSize];
353     Entry.Key = Key;
354     // DEBUG(Entry.dump("Created root entry: "));
355     return Entry;
356   }
357 
358   MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,
359                      BumpPtrAllocator &Alloc, int CurLevel) {
360     // DEBUG(reportNumber("getEntry called, level ", CurLevel, 10));
361     const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;
362     uint64_t Remainder = Selector / NumEntries;
363     Selector = Selector % NumEntries;
364     MapEntry &Entry = Entries[Selector];
365 
366     // A hit
367     if (Entry.Key == Key) {
368       // DEBUG(Entry.dump("Hit: "));
369       return Entry;
370     }
371 
372     // Vacant - add new entry
373     if (Entry.Key == VacantMarker) {
374       Entry.Key = Key;
375       // DEBUG(Entry.dump("Adding new entry: "));
376       return Entry;
377     }
378 
379     // Defer to the next level
380     if (Entry.Key & FollowUpTableMarker) {
381       return getEntry(
382           reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),
383           Key, Remainder, Alloc, CurLevel + 1);
384     }
385 
386     // Conflict - create the next level
387     // DEBUG(Entry.dump("Creating new level: "));
388 
389     MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];
390     // DEBUG(
391     //     reportNumber("Newly allocated level: 0x", uint64_t(NextLevelTbl),
392     //     16));
393     uint64_t CurEntrySelector = Entry.Key / InitialSize;
394     for (int I = 0; I < CurLevel; ++I)
395       CurEntrySelector /= IncSize;
396     CurEntrySelector = CurEntrySelector % IncSize;
397     NextLevelTbl[CurEntrySelector] = Entry;
398     Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;
399     assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) !=
400                uint64_t(Entries),
401            "circular reference created!\n");
402     // DEBUG(NextLevelTbl[CurEntrySelector].dump("New level entry: "));
403     // DEBUG(Entry.dump("Updated old entry: "));
404     return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);
405   }
406 
407   MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {
408     if (TableRoot)
409       return getEntry(TableRoot, Key, Key, Alloc, 0);
410     return firstAllocation(Key, Alloc);
411   }
412 };
413 
414 template <typename T> void resetIndCallCounter(T &Entry) {
415   Entry.Val = 0;
416 }
417 
418 template <typename T, uint32_t X, uint32_t Y>
419 void SimpleHashTable<T, X, Y>::resetCounters() {
420   forEachElement(resetIndCallCounter);
421 }
422 
423 /// Represents a hash table mapping a function target address to its counter.
424 using IndirectCallHashTable = SimpleHashTable<>;
425 
426 /// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
427 /// global array of all hash tables storing indirect call destinations happening
428 /// during runtime, one table per call site.
429 IndirectCallHashTable *GlobalIndCallCounters{
430     reinterpret_cast<IndirectCallHashTable *>(1)};
431 
432 /// Don't allow reentrancy in the fdata writing phase - only one thread writes
433 /// it
434 Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};
435 
436 /// Store number of calls in additional to target address (Key) and frequency
437 /// as perceived by the basic block counter (Val).
438 struct CallFlowEntryBase : public SimpleHashTableEntryBase {
439   uint64_t Calls;
440 };
441 
442 using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;
443 
444 /// This is a large table indexing all possible call targets (indirect and
445 /// direct ones). The goal is to find mismatches between number of calls (for
446 /// those calls we were able to track) and the entry basic block counter of the
447 /// callee. In most cases, these two should be equal. If not, there are two
448 /// possible scenarios here:
449 ///
450 ///  * Entry BB has higher frequency than all known calls to this function.
451 ///    In this case, we have dynamic library code or any uninstrumented code
452 ///    calling this function. We will write the profile for these untracked
453 ///    calls as having source "0 [unknown] 0" in the fdata file.
454 ///
455 ///  * Number of known calls is higher than the frequency of entry BB
456 ///    This only happens when there is no counter for the entry BB / callee
457 ///    function is not simple (in BOLT terms). We don't do anything special
458 ///    here and just ignore those (we still report all calls to the non-simple
459 ///    function, though).
460 ///
461 class CallFlowHashTable : public CallFlowHashTableBase {
462 public:
463   CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
464 
465   MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }
466 
467 private:
468   // Different than the hash table for indirect call targets, we do store the
469   // allocator here since there is only one call flow hash and space overhead
470   // is negligible.
471   BumpPtrAllocator &Alloc;
472 };
473 
474 ///
475 /// Description metadata emitted by BOLT to describe the program - refer to
476 /// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
477 ///
478 struct Location {
479   uint32_t FunctionName;
480   uint32_t Offset;
481 };
482 
483 struct CallDescription {
484   Location From;
485   uint32_t FromNode;
486   Location To;
487   uint32_t Counter;
488   uint64_t TargetAddress;
489 };
490 
491 using IndCallDescription = Location;
492 
493 struct IndCallTargetDescription {
494   Location Loc;
495   uint64_t Address;
496 };
497 
498 struct EdgeDescription {
499   Location From;
500   uint32_t FromNode;
501   Location To;
502   uint32_t ToNode;
503   uint32_t Counter;
504 };
505 
506 struct InstrumentedNode {
507   uint32_t Node;
508   uint32_t Counter;
509 };
510 
511 struct EntryNode {
512   uint64_t Node;
513   uint64_t Address;
514 };
515 
516 struct FunctionDescription {
517   uint32_t NumLeafNodes;
518   const InstrumentedNode *LeafNodes;
519   uint32_t NumEdges;
520   const EdgeDescription *Edges;
521   uint32_t NumCalls;
522   const CallDescription *Calls;
523   uint32_t NumEntryNodes;
524   const EntryNode *EntryNodes;
525 
526   /// Constructor will parse the serialized function metadata written by BOLT
527   FunctionDescription(const uint8_t *FuncDesc);
528 
529   uint64_t getSize() const {
530     return 16 + NumLeafNodes * sizeof(InstrumentedNode) +
531            NumEdges * sizeof(EdgeDescription) +
532            NumCalls * sizeof(CallDescription) +
533            NumEntryNodes * sizeof(EntryNode);
534   }
535 };
536 
537 /// The context is created when the fdata profile needs to be written to disk
538 /// and we need to interpret our runtime counters. It contains pointers to the
539 /// mmaped binary (only the BOLT written metadata section). Deserialization
540 /// should be straightforward as most data is POD or an array of POD elements.
541 /// This metadata is used to reconstruct function CFGs.
542 struct ProfileWriterContext {
543   IndCallDescription *IndCallDescriptions;
544   IndCallTargetDescription *IndCallTargets;
545   uint8_t *FuncDescriptions;
546   char *Strings;  // String table with function names used in this binary
547   int FileDesc;   // File descriptor for the file on disk backing this
548                   // information in memory via mmap
549   void *MMapPtr;  // The mmap ptr
550   int MMapSize;   // The mmap size
551 
552   /// Hash table storing all possible call destinations to detect untracked
553   /// calls and correctly report them as [unknown] in output fdata.
554   CallFlowHashTable *CallFlowTable;
555 
556   /// Lookup the sorted indirect call target vector to fetch function name and
557   /// offset for an arbitrary function pointer.
558   const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;
559 };
560 
561 /// Perform a string comparison and returns zero if Str1 matches Str2. Compares
562 /// at most Size characters.
563 int compareStr(const char *Str1, const char *Str2, int Size) {
564   while (*Str1 == *Str2) {
565     if (*Str1 == '\0' || --Size == 0)
566       return 0;
567     ++Str1;
568     ++Str2;
569   }
570   return 1;
571 }
572 
573 /// Output Location to the fdata file
574 char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,
575                    const Location Loc, uint32_t BufSize) {
576   // fdata location format: Type Name Offset
577   // Type 1 - regular symbol
578   OutBuf = strCopy(OutBuf, "1 ");
579   const char *Str = Ctx.Strings + Loc.FunctionName;
580   uint32_t Size = 25;
581   while (*Str) {
582     *OutBuf++ = *Str++;
583     if (++Size >= BufSize)
584       break;
585   }
586   assert(!*Str, "buffer overflow, function name too large");
587   *OutBuf++ = ' ';
588   OutBuf = intToStr(OutBuf, Loc.Offset, 16);
589   *OutBuf++ = ' ';
590   return OutBuf;
591 }
592 
593 /// Read and deserialize a function description written by BOLT. \p FuncDesc
594 /// points at the beginning of the function metadata structure in the file.
595 /// See Instrumentation::emitTablesAsELFNote()
596 FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {
597   NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);
598   DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10));
599   LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);
600 
601   NumEdges = *reinterpret_cast<const uint32_t *>(
602       FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));
603   DEBUG(reportNumber("NumEdges = ", NumEdges, 10));
604   Edges = reinterpret_cast<const EdgeDescription *>(
605       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));
606 
607   NumCalls = *reinterpret_cast<const uint32_t *>(
608       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +
609       NumEdges * sizeof(EdgeDescription));
610   DEBUG(reportNumber("NumCalls = ", NumCalls, 10));
611   Calls = reinterpret_cast<const CallDescription *>(
612       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
613       NumEdges * sizeof(EdgeDescription));
614   NumEntryNodes = *reinterpret_cast<const uint32_t *>(
615       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
616       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
617   DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10));
618   EntryNodes = reinterpret_cast<const EntryNode *>(
619       FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +
620       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
621 }
622 
623 /// Read and mmap descriptions written by BOLT from the executable's notes
624 /// section
625 #if defined(HAVE_ELF_H) and !defined(__APPLE__)
626 
627 void *__attribute__((noinline)) __get_pc() {
628   return __builtin_extract_return_addr(__builtin_return_address(0));
629 }
630 
631 /// Get string with address and parse it to hex pair <StartAddress, EndAddress>
632 bool parseAddressRange(const char *Str, uint64_t &StartAddress,
633                        uint64_t &EndAddress) {
634   if (!Str)
635     return false;
636   // Parsed string format: <hex1>-<hex2>
637   StartAddress = hexToLong(Str, '-');
638   while (*Str && *Str != '-')
639     ++Str;
640   if (!*Str)
641     return false;
642   ++Str; // swallow '-'
643   EndAddress = hexToLong(Str);
644   return true;
645 }
646 
647 /// Get full path to the real binary by getting current virtual address
648 /// and searching for the appropriate link in address range in
649 /// /proc/self/map_files
650 static char *getBinaryPath() {
651   const uint32_t BufSize = 1024;
652   const uint32_t NameMax = 4096;
653   const char DirPath[] = "/proc/self/map_files/";
654   static char TargetPath[NameMax] = {};
655   char Buf[BufSize];
656 
657   if (__bolt_instr_binpath[0] != '\0')
658     return __bolt_instr_binpath;
659 
660   if (TargetPath[0] != '\0')
661     return TargetPath;
662 
663   unsigned long CurAddr = (unsigned long)__get_pc();
664   uint64_t FDdir = __open(DirPath,
665                           /*flags=*/0 /*O_RDONLY*/,
666                           /*mode=*/0666);
667   assert(static_cast<int64_t>(FDdir) >= 0,
668          "failed to open /proc/self/map_files");
669 
670   while (long Nread = __getdents(FDdir, (struct dirent *)Buf, BufSize)) {
671     assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");
672 
673     struct dirent *d;
674     for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {
675       d = (struct dirent *)(Buf + Bpos);
676 
677       uint64_t StartAddress, EndAddress;
678       if (!parseAddressRange(d->d_name, StartAddress, EndAddress))
679         continue;
680       if (CurAddr < StartAddress || CurAddr > EndAddress)
681         continue;
682       char FindBuf[NameMax];
683       char *C = strCopy(FindBuf, DirPath, NameMax);
684       C = strCopy(C, d->d_name, NameMax - (C - FindBuf));
685       *C = '\0';
686       uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));
687       assert(Ret != -1 && Ret != BufSize, "readlink error");
688       TargetPath[Ret] = '\0';
689       return TargetPath;
690     }
691   }
692   return nullptr;
693 }
694 
695 ProfileWriterContext readDescriptions() {
696   ProfileWriterContext Result;
697   char *BinPath = getBinaryPath();
698   assert(BinPath && BinPath[0] != '\0', "failed to find binary path");
699 
700   uint64_t FD = __open(BinPath,
701                        /*flags=*/0 /*O_RDONLY*/,
702                        /*mode=*/0666);
703   assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");
704 
705   Result.FileDesc = FD;
706 
707   // mmap our binary to memory
708   uint64_t Size = __lseek(FD, 0, 2 /*SEEK_END*/);
709   uint8_t *BinContents = reinterpret_cast<uint8_t *>(
710       __mmap(0, Size, PROT_READ, MAP_PRIVATE, FD, 0));
711   Result.MMapPtr = BinContents;
712   Result.MMapSize = Size;
713   Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);
714   Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);
715   Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(
716       BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);
717 
718   // Find .bolt.instr.tables with the data we need and set pointers to it
719   for (int I = 0; I < Hdr->e_shnum; ++I) {
720     char *SecName = reinterpret_cast<char *>(
721         BinContents + StringTblHeader->sh_offset + Shdr->sh_name);
722     if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {
723       Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +
724                                             (I + 1) * Hdr->e_shentsize);
725       continue;
726     }
727     // Actual contents of the ELF note start after offset 20 decimal:
728     // Offset 0: Producer name size (4 bytes)
729     // Offset 4: Contents size (4 bytes)
730     // Offset 8: Note type (4 bytes)
731     // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)
732     // Offset 20: Contents
733     uint32_t IndCallDescSize =
734         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);
735     uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(
736         BinContents + Shdr->sh_offset + 24 + IndCallDescSize);
737     uint32_t FuncDescSize =
738         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +
739                                       IndCallDescSize + IndCallTargetDescSize);
740     Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(
741         BinContents + Shdr->sh_offset + 24);
742     Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
743         BinContents + Shdr->sh_offset + 28 + IndCallDescSize);
744     Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +
745                               IndCallDescSize + IndCallTargetDescSize;
746     Result.Strings = reinterpret_cast<char *>(
747         BinContents + Shdr->sh_offset + 32 + IndCallDescSize +
748         IndCallTargetDescSize + FuncDescSize);
749     return Result;
750   }
751   const char ErrMsg[] =
752       "BOLT instrumentation runtime error: could not find section "
753       ".bolt.instr.tables\n";
754   reportError(ErrMsg, sizeof(ErrMsg));
755   return Result;
756 }
757 
758 #else
759 
760 ProfileWriterContext readDescriptions() {
761   ProfileWriterContext Result;
762   uint8_t *Tables = _bolt_instr_tables_getter();
763   uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);
764   uint32_t IndCallTargetDescSize =
765       *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);
766   uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(
767       Tables + 8 + IndCallDescSize + IndCallTargetDescSize);
768   Result.IndCallDescriptions =
769       reinterpret_cast<IndCallDescription *>(Tables + 4);
770   Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
771       Tables + 8 + IndCallDescSize);
772   Result.FuncDescriptions =
773       Tables + 12 + IndCallDescSize + IndCallTargetDescSize;
774   Result.Strings = reinterpret_cast<char *>(
775       Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);
776   return Result;
777 }
778 
779 #endif
780 
781 #if !defined(__APPLE__)
782 /// Debug by printing overall metadata global numbers to check it is sane
783 void printStats(const ProfileWriterContext &Ctx) {
784   char StatMsg[BufSize];
785   char *StatPtr = StatMsg;
786   StatPtr =
787       strCopy(StatPtr,
788               "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");
789   StatPtr = intToStr(StatPtr,
790                      Ctx.FuncDescriptions -
791                          reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),
792                      10);
793   StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");
794   StatPtr = intToStr(
795       StatPtr,
796       reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);
797   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");
798   StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);
799   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");
800   StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);
801   StatPtr = strCopy(StatPtr, "\n");
802   __write(2, StatMsg, StatPtr - StatMsg);
803 }
804 #endif
805 
806 
807 /// This is part of a simple CFG representation in memory, where we store
808 /// a dynamically sized array of input and output edges per node, and store
809 /// a dynamically sized array of nodes per graph. We also store the spanning
810 /// tree edges for that CFG in a separate array of nodes in
811 /// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
812 struct Edge {
813   uint32_t Node; // Index in nodes array regarding the destination of this edge
814   uint32_t ID;   // Edge index in an array comprising all edges of the graph
815 };
816 
817 /// A regular graph node or a spanning tree node
818 struct Node {
819   uint32_t NumInEdges{0};  // Input edge count used to size InEdge
820   uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges
821   Edge *InEdges{nullptr};  // Created and managed by \p Graph
822   Edge *OutEdges{nullptr}; // ditto
823 };
824 
825 /// Main class for CFG representation in memory. Manages object creation and
826 /// destruction, populates an array of CFG nodes as well as corresponding
827 /// spanning tree nodes.
828 struct Graph {
829   uint32_t NumNodes;
830   Node *CFGNodes;
831   Node *SpanningTreeNodes;
832   uint64_t *EdgeFreqs;
833   uint64_t *CallFreqs;
834   BumpPtrAllocator &Alloc;
835   const FunctionDescription &D;
836 
837   /// Reads a list of edges from function description \p D and builds
838   /// the graph from it. Allocates several internal dynamic structures that are
839   /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all
840   /// spanning tree leaf nodes descriptions (their counters). They are the seed
841   /// used to compute the rest of the missing edge counts in a bottom-up
842   /// traversal of the spanning tree.
843   Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
844         const uint64_t *Counters, ProfileWriterContext &Ctx);
845   ~Graph();
846   void dump() const;
847 
848 private:
849   void computeEdgeFrequencies(const uint64_t *Counters,
850                               ProfileWriterContext &Ctx);
851   void dumpEdgeFreqs() const;
852 };
853 
854 Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
855              const uint64_t *Counters, ProfileWriterContext &Ctx)
856     : Alloc(Alloc), D(D) {
857   DEBUG(reportNumber("G = 0x", (uint64_t)this, 16));
858   // First pass to determine number of nodes
859   int32_t MaxNodes = -1;
860   CallFreqs = nullptr;
861   EdgeFreqs = nullptr;
862   for (int I = 0; I < D.NumEdges; ++I) {
863     if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)
864       MaxNodes = D.Edges[I].FromNode;
865     if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)
866       MaxNodes = D.Edges[I].ToNode;
867   }
868 
869   for (int I = 0; I < D.NumLeafNodes; ++I)
870     if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)
871       MaxNodes = D.LeafNodes[I].Node;
872 
873   for (int I = 0; I < D.NumCalls; ++I)
874     if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)
875       MaxNodes = D.Calls[I].FromNode;
876 
877   // No nodes? Nothing to do
878   if (MaxNodes < 0) {
879     DEBUG(report("No nodes!\n"));
880     CFGNodes = nullptr;
881     SpanningTreeNodes = nullptr;
882     NumNodes = 0;
883     return;
884   }
885   ++MaxNodes;
886   DEBUG(reportNumber("NumNodes = ", MaxNodes, 10));
887   NumNodes = static_cast<uint32_t>(MaxNodes);
888 
889   // Initial allocations
890   CFGNodes = new (Alloc) Node[MaxNodes];
891 
892   DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16));
893   SpanningTreeNodes = new (Alloc) Node[MaxNodes];
894   DEBUG(reportNumber("G->SpanningTreeNodes = 0x",
895                      (uint64_t)SpanningTreeNodes, 16));
896 
897   // Figure out how much to allocate to each vector (in/out edge sets)
898   for (int I = 0; I < D.NumEdges; ++I) {
899     CFGNodes[D.Edges[I].FromNode].NumOutEdges++;
900     CFGNodes[D.Edges[I].ToNode].NumInEdges++;
901     if (D.Edges[I].Counter != 0xffffffff)
902       continue;
903 
904     SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;
905     SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;
906   }
907 
908   // Allocate in/out edge sets
909   for (int I = 0; I < MaxNodes; ++I) {
910     if (CFGNodes[I].NumInEdges > 0)
911       CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];
912     if (CFGNodes[I].NumOutEdges > 0)
913       CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];
914     if (SpanningTreeNodes[I].NumInEdges > 0)
915       SpanningTreeNodes[I].InEdges =
916           new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];
917     if (SpanningTreeNodes[I].NumOutEdges > 0)
918       SpanningTreeNodes[I].OutEdges =
919           new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];
920     CFGNodes[I].NumInEdges = 0;
921     CFGNodes[I].NumOutEdges = 0;
922     SpanningTreeNodes[I].NumInEdges = 0;
923     SpanningTreeNodes[I].NumOutEdges = 0;
924   }
925 
926   // Fill in/out edge sets
927   for (int I = 0; I < D.NumEdges; ++I) {
928     const uint32_t Src = D.Edges[I].FromNode;
929     const uint32_t Dst = D.Edges[I].ToNode;
930     Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];
931     E->Node = Dst;
932     E->ID = I;
933 
934     E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];
935     E->Node = Src;
936     E->ID = I;
937 
938     if (D.Edges[I].Counter != 0xffffffff)
939       continue;
940 
941     E = &SpanningTreeNodes[Src]
942              .OutEdges[SpanningTreeNodes[Src].NumOutEdges++];
943     E->Node = Dst;
944     E->ID = I;
945 
946     E = &SpanningTreeNodes[Dst]
947              .InEdges[SpanningTreeNodes[Dst].NumInEdges++];
948     E->Node = Src;
949     E->ID = I;
950   }
951 
952   computeEdgeFrequencies(Counters, Ctx);
953 }
954 
955 Graph::~Graph() {
956   if (CallFreqs)
957     Alloc.deallocate(CallFreqs);
958   if (EdgeFreqs)
959     Alloc.deallocate(EdgeFreqs);
960   for (int I = NumNodes - 1; I >= 0; --I) {
961     if (SpanningTreeNodes[I].OutEdges)
962       Alloc.deallocate(SpanningTreeNodes[I].OutEdges);
963     if (SpanningTreeNodes[I].InEdges)
964       Alloc.deallocate(SpanningTreeNodes[I].InEdges);
965     if (CFGNodes[I].OutEdges)
966       Alloc.deallocate(CFGNodes[I].OutEdges);
967     if (CFGNodes[I].InEdges)
968       Alloc.deallocate(CFGNodes[I].InEdges);
969   }
970   if (SpanningTreeNodes)
971     Alloc.deallocate(SpanningTreeNodes);
972   if (CFGNodes)
973     Alloc.deallocate(CFGNodes);
974 }
975 
976 void Graph::dump() const {
977   reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);
978   report("  Full graph:\n");
979   for (int I = 0; I < NumNodes; ++I) {
980     const Node *N = &CFGNodes[I];
981     reportNumber("    Node #", I, 10);
982     reportNumber("      InEdges total ", N->NumInEdges, 10);
983     for (int J = 0; J < N->NumInEdges; ++J)
984       reportNumber("        ", N->InEdges[J].Node, 10);
985     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
986     for (int J = 0; J < N->NumOutEdges; ++J)
987       reportNumber("        ", N->OutEdges[J].Node, 10);
988     report("\n");
989   }
990   report("  Spanning tree:\n");
991   for (int I = 0; I < NumNodes; ++I) {
992     const Node *N = &SpanningTreeNodes[I];
993     reportNumber("    Node #", I, 10);
994     reportNumber("      InEdges total ", N->NumInEdges, 10);
995     for (int J = 0; J < N->NumInEdges; ++J)
996       reportNumber("        ", N->InEdges[J].Node, 10);
997     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
998     for (int J = 0; J < N->NumOutEdges; ++J)
999       reportNumber("        ", N->OutEdges[J].Node, 10);
1000     report("\n");
1001   }
1002 }
1003 
1004 void Graph::dumpEdgeFreqs() const {
1005   reportNumber(
1006       "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);
1007   for (int I = 0; I < D.NumEdges; ++I) {
1008     reportNumber("* Src: ", D.Edges[I].FromNode, 10);
1009     reportNumber("  Dst: ", D.Edges[I].ToNode, 10);
1010     reportNumber("    Cnt: ", EdgeFreqs[I], 10);
1011   }
1012 }
1013 
1014 /// Auxiliary map structure for fast lookups of which calls map to each node of
1015 /// the function CFG
1016 struct NodeToCallsMap {
1017   struct MapEntry {
1018     uint32_t NumCalls;
1019     uint32_t *Calls;
1020   };
1021   MapEntry *Entries;
1022   BumpPtrAllocator &Alloc;
1023   const uint32_t NumNodes;
1024 
1025   NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,
1026                  uint32_t NumNodes)
1027       : Alloc(Alloc), NumNodes(NumNodes) {
1028     Entries = new (Alloc, 0) MapEntry[NumNodes];
1029     for (int I = 0; I < D.NumCalls; ++I) {
1030       DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10));
1031       ++Entries[D.Calls[I].FromNode].NumCalls;
1032     }
1033     for (int I = 0; I < NumNodes; ++I) {
1034       Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)
1035                                                    uint32_t[Entries[I].NumCalls]
1036                                              : nullptr;
1037       Entries[I].NumCalls = 0;
1038     }
1039     for (int I = 0; I < D.NumCalls; ++I) {
1040       MapEntry &Entry = Entries[D.Calls[I].FromNode];
1041       Entry.Calls[Entry.NumCalls++] = I;
1042     }
1043   }
1044 
1045   /// Set the frequency of all calls in node \p NodeID to Freq. However, if
1046   /// the calls have their own counters and do not depend on the basic block
1047   /// counter, this means they have landing pads and throw exceptions. In this
1048   /// case, set their frequency with their counters and return the maximum
1049   /// value observed in such counters. This will be used as the new frequency
1050   /// at basic block entry. This is used to fix the CFG edge frequencies in the
1051   /// presence of exceptions.
1052   uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,
1053                            const FunctionDescription &D,
1054                            const uint64_t *Counters,
1055                            ProfileWriterContext &Ctx) const {
1056     const MapEntry &Entry = Entries[NodeID];
1057     uint64_t MaxValue = 0ull;
1058     for (int I = 0, E = Entry.NumCalls; I != E; ++I) {
1059       const uint32_t CallID = Entry.Calls[I];
1060       DEBUG(reportNumber("  Setting freq for call ID: ", CallID, 10));
1061       const CallDescription &CallDesc = D.Calls[CallID];
1062       if (CallDesc.Counter == 0xffffffff) {
1063         CallFreqs[CallID] = Freq;
1064         DEBUG(reportNumber("  with : ", Freq, 10));
1065       } else {
1066         const uint64_t CounterVal = Counters[CallDesc.Counter];
1067         CallFreqs[CallID] = CounterVal;
1068         MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;
1069         DEBUG(reportNumber("  with (private counter) : ", CounterVal, 10));
1070       }
1071       DEBUG(reportNumber("  Address: 0x", CallDesc.TargetAddress, 16));
1072       if (CallFreqs[CallID] > 0)
1073         Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=
1074             CallFreqs[CallID];
1075     }
1076     return MaxValue;
1077   }
1078 
1079   ~NodeToCallsMap() {
1080     for (int I = NumNodes - 1; I >= 0; --I)
1081       if (Entries[I].Calls)
1082         Alloc.deallocate(Entries[I].Calls);
1083     Alloc.deallocate(Entries);
1084   }
1085 };
1086 
1087 /// Fill an array with the frequency of each edge in the function represented
1088 /// by G, as well as another array for each call.
1089 void Graph::computeEdgeFrequencies(const uint64_t *Counters,
1090                                    ProfileWriterContext &Ctx) {
1091   if (NumNodes == 0)
1092     return;
1093 
1094   EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;
1095   CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;
1096 
1097   // Setup a lookup for calls present in each node (BB)
1098   NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);
1099 
1100   // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the
1101   // spanning tree don't have explicit counters. We must infer their value using
1102   // a linear combination of other counters (sum of counters of the outgoing
1103   // edges minus sum of counters of the incoming edges).
1104   uint32_t *Stack = new (Alloc) uint32_t [NumNodes];
1105   uint32_t StackTop = 0;
1106   enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };
1107   Status *Visited = new (Alloc, 0) Status[NumNodes];
1108   uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];
1109   uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];
1110 
1111   // Setup a fast lookup for frequency of leaf nodes, which have special
1112   // basic block frequency instrumentation (they are not edge profiled).
1113   for (int I = 0; I < D.NumLeafNodes; ++I) {
1114     LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];
1115     DEBUG({
1116       if (Counters[D.LeafNodes[I].Counter] > 0) {
1117         reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);
1118         reportNumber("     Counter: ", Counters[D.LeafNodes[I].Counter], 10);
1119       }
1120     });
1121   }
1122   for (int I = 0; I < D.NumEntryNodes; ++I) {
1123     EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;
1124     DEBUG({
1125         reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);
1126         reportNumber("      Address: ", D.EntryNodes[I].Address, 16);
1127     });
1128   }
1129   // Add all root nodes to the stack
1130   for (int I = 0; I < NumNodes; ++I)
1131     if (SpanningTreeNodes[I].NumInEdges == 0)
1132       Stack[StackTop++] = I;
1133 
1134   // Empty stack?
1135   if (StackTop == 0) {
1136     DEBUG(report("Empty stack!\n"));
1137     Alloc.deallocate(EntryAddress);
1138     Alloc.deallocate(LeafFrequency);
1139     Alloc.deallocate(Visited);
1140     Alloc.deallocate(Stack);
1141     CallMap->~NodeToCallsMap();
1142     Alloc.deallocate(CallMap);
1143     if (CallFreqs)
1144       Alloc.deallocate(CallFreqs);
1145     if (EdgeFreqs)
1146       Alloc.deallocate(EdgeFreqs);
1147     EdgeFreqs = nullptr;
1148     CallFreqs = nullptr;
1149     return;
1150   }
1151   // Add all known edge counts, will infer the rest
1152   for (int I = 0; I < D.NumEdges; ++I) {
1153     const uint32_t C = D.Edges[I].Counter;
1154     if (C == 0xffffffff) // inferred counter - we will compute its value
1155       continue;
1156     EdgeFreqs[I] = Counters[C];
1157   }
1158 
1159   while (StackTop > 0) {
1160     const uint32_t Cur = Stack[--StackTop];
1161     DEBUG({
1162       if (Visited[Cur] == S_VISITING)
1163         report("(visiting) ");
1164       else
1165         report("(new) ");
1166       reportNumber("Cur: ", Cur, 10);
1167     });
1168 
1169     // This shouldn't happen in a tree
1170     assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");
1171     if (Visited[Cur] == S_NEW) {
1172       Visited[Cur] = S_VISITING;
1173       Stack[StackTop++] = Cur;
1174       assert(StackTop <= NumNodes, "stack grew too large");
1175       for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {
1176         const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;
1177         Stack[StackTop++] = Succ;
1178         assert(StackTop <= NumNodes, "stack grew too large");
1179       }
1180       continue;
1181     }
1182     Visited[Cur] = S_VISITED;
1183 
1184     // Establish our node frequency based on outgoing edges, which should all be
1185     // resolved by now.
1186     int64_t CurNodeFreq = LeafFrequency[Cur];
1187     // Not a leaf?
1188     if (!CurNodeFreq) {
1189       for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {
1190         const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;
1191         CurNodeFreq += EdgeFreqs[SuccEdge];
1192       }
1193     }
1194     if (CurNodeFreq < 0)
1195       CurNodeFreq = 0;
1196 
1197     const uint64_t CallFreq = CallMap->visitAllCallsIn(
1198         Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx);
1199 
1200     // Exception handling affected our output flow? Fix with calls info
1201     DEBUG({
1202       if (CallFreq > CurNodeFreq)
1203         report("Bumping node frequency with call info\n");
1204     });
1205     CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;
1206 
1207     if (CurNodeFreq > 0) {
1208       if (uint64_t Addr = EntryAddress[Cur]) {
1209         DEBUG(
1210             reportNumber("  Setting flow at entry point address 0x", Addr, 16));
1211         DEBUG(reportNumber("  with: ", CurNodeFreq, 10));
1212         Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;
1213       }
1214     }
1215 
1216     // No parent? Reached a tree root, limit to call frequency updating.
1217     if (SpanningTreeNodes[Cur].NumInEdges == 0)
1218       continue;
1219 
1220     assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
1221     const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
1222     const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
1223 
1224     // Calculate parent edge freq.
1225     int64_t ParentEdgeFreq = CurNodeFreq;
1226     for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {
1227       const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;
1228       ParentEdgeFreq -= EdgeFreqs[PredEdge];
1229     }
1230 
1231     // Sometimes the conservative CFG that BOLT builds will lead to incorrect
1232     // flow computation. For example, in a BB that transitively calls the exit
1233     // syscall, BOLT will add a fall-through successor even though it should not
1234     // have any successors. So this block execution will likely be wrong. We
1235     // tolerate this imperfection since this case should be quite infrequent.
1236     if (ParentEdgeFreq < 0) {
1237       DEBUG(dumpEdgeFreqs());
1238       DEBUG(report("WARNING: incorrect flow"));
1239       ParentEdgeFreq = 0;
1240     }
1241     DEBUG(reportNumber("  Setting freq for ParentEdge: ", ParentEdge, 10));
1242     DEBUG(reportNumber("  with ParentEdgeFreq: ", ParentEdgeFreq, 10));
1243     EdgeFreqs[ParentEdge] = ParentEdgeFreq;
1244   }
1245 
1246   Alloc.deallocate(EntryAddress);
1247   Alloc.deallocate(LeafFrequency);
1248   Alloc.deallocate(Visited);
1249   Alloc.deallocate(Stack);
1250   CallMap->~NodeToCallsMap();
1251   Alloc.deallocate(CallMap);
1252   DEBUG(dumpEdgeFreqs());
1253 }
1254 
1255 /// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
1256 /// \p Alloc to allocate helper dynamic structures used to compute profile for
1257 /// edges that we do not explictly instrument.
1258 const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,
1259                                     const uint8_t *FuncDesc,
1260                                     BumpPtrAllocator &Alloc) {
1261   const FunctionDescription F(FuncDesc);
1262   const uint8_t *next = FuncDesc + F.getSize();
1263 
1264 #if !defined(__APPLE__)
1265   uint64_t *bolt_instr_locations = __bolt_instr_locations;
1266 #else
1267   uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();
1268 #endif
1269 
1270   // Skip funcs we know are cold
1271 #ifndef ENABLE_DEBUG
1272   uint64_t CountersFreq = 0;
1273   for (int I = 0; I < F.NumLeafNodes; ++I)
1274     CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];
1275 
1276   if (CountersFreq == 0) {
1277     for (int I = 0; I < F.NumEdges; ++I) {
1278       const uint32_t C = F.Edges[I].Counter;
1279       if (C == 0xffffffff)
1280         continue;
1281       CountersFreq += bolt_instr_locations[C];
1282     }
1283     if (CountersFreq == 0) {
1284       for (int I = 0; I < F.NumCalls; ++I) {
1285         const uint32_t C = F.Calls[I].Counter;
1286         if (C == 0xffffffff)
1287           continue;
1288         CountersFreq += bolt_instr_locations[C];
1289       }
1290       if (CountersFreq == 0)
1291         return next;
1292     }
1293   }
1294 #endif
1295 
1296   Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);
1297   DEBUG(G->dump());
1298 
1299   if (!G->EdgeFreqs && !G->CallFreqs) {
1300     G->~Graph();
1301     Alloc.deallocate(G);
1302     return next;
1303   }
1304 
1305   for (int I = 0; I < F.NumEdges; ++I) {
1306     const uint64_t Freq = G->EdgeFreqs[I];
1307     if (Freq == 0)
1308       continue;
1309     const EdgeDescription *Desc = &F.Edges[I];
1310     char LineBuf[BufSize];
1311     char *Ptr = LineBuf;
1312     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1313     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1314     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);
1315     Ptr = intToStr(Ptr, Freq, 10);
1316     *Ptr++ = '\n';
1317     __write(FD, LineBuf, Ptr - LineBuf);
1318   }
1319 
1320   for (int I = 0; I < F.NumCalls; ++I) {
1321     const uint64_t Freq = G->CallFreqs[I];
1322     if (Freq == 0)
1323       continue;
1324     char LineBuf[BufSize];
1325     char *Ptr = LineBuf;
1326     const CallDescription *Desc = &F.Calls[I];
1327     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
1328     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1329     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1330     Ptr = intToStr(Ptr, Freq, 10);
1331     *Ptr++ = '\n';
1332     __write(FD, LineBuf, Ptr - LineBuf);
1333   }
1334 
1335   G->~Graph();
1336   Alloc.deallocate(G);
1337   return next;
1338 }
1339 
1340 #if !defined(__APPLE__)
1341 const IndCallTargetDescription *
1342 ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {
1343   uint32_t B = 0;
1344   uint32_t E = __bolt_instr_num_ind_targets;
1345   if (E == 0)
1346     return nullptr;
1347   do {
1348     uint32_t I = (E - B) / 2 + B;
1349     if (IndCallTargets[I].Address == Target)
1350       return &IndCallTargets[I];
1351     if (IndCallTargets[I].Address < Target)
1352       B = I + 1;
1353     else
1354       E = I;
1355   } while (B < E);
1356   return nullptr;
1357 }
1358 
1359 /// Write a single indirect call <src, target> pair to the fdata file
1360 void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,
1361                          int FD, int CallsiteID,
1362                          ProfileWriterContext *Ctx) {
1363   if (Entry.Val == 0)
1364     return;
1365   DEBUG(reportNumber("Target func 0x", Entry.Key, 16));
1366   DEBUG(reportNumber("Target freq: ", Entry.Val, 10));
1367   const IndCallDescription *CallsiteDesc =
1368       &Ctx->IndCallDescriptions[CallsiteID];
1369   const IndCallTargetDescription *TargetDesc =
1370       Ctx->lookupIndCallTarget(Entry.Key);
1371   if (!TargetDesc) {
1372     DEBUG(report("Failed to lookup indirect call target\n"));
1373     char LineBuf[BufSize];
1374     char *Ptr = LineBuf;
1375     Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1376     Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);
1377     Ptr = intToStr(Ptr, Entry.Val, 10);
1378     *Ptr++ = '\n';
1379     __write(FD, LineBuf, Ptr - LineBuf);
1380     return;
1381   }
1382   Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;
1383   char LineBuf[BufSize];
1384   char *Ptr = LineBuf;
1385   Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
1386   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1387   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1388   Ptr = intToStr(Ptr, Entry.Val, 10);
1389   *Ptr++ = '\n';
1390   __write(FD, LineBuf, Ptr - LineBuf);
1391 }
1392 
1393 /// Write to \p FD all of the indirect call profiles.
1394 void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {
1395   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {
1396     DEBUG(reportNumber("IndCallsite #", I, 10));
1397     GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);
1398   }
1399 }
1400 
1401 /// Check a single call flow for a callee versus all known callers. If there are
1402 /// less callers than what the callee expects, write the difference with source
1403 /// [unknown] in the profile.
1404 void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
1405                         ProfileWriterContext *Ctx) {
1406   DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16));
1407   DEBUG(reportNumber("Calls: ", Entry.Calls, 10));
1408   DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10));
1409   DEBUG({
1410     if (Entry.Calls > Entry.Val)
1411       report("  More calls than expected!\n");
1412   });
1413   if (Entry.Val <= Entry.Calls)
1414     return;
1415   DEBUG(reportNumber(
1416       "  Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10));
1417   const IndCallTargetDescription *TargetDesc =
1418       Ctx->lookupIndCallTarget(Entry.Key);
1419   if (!TargetDesc) {
1420     // There is probably something wrong with this callee and this should be
1421     // investigated, but I don't want to assert and lose all data collected.
1422     DEBUG(report("WARNING: failed to look up call target!\n"));
1423     return;
1424   }
1425   char LineBuf[BufSize];
1426   char *Ptr = LineBuf;
1427   Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);
1428   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1429   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
1430   Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);
1431   *Ptr++ = '\n';
1432   __write(FD, LineBuf, Ptr - LineBuf);
1433 }
1434 
1435 /// Open fdata file for writing and return a valid file descriptor, aborting
1436 /// program upon failure.
1437 int openProfile() {
1438   // Build the profile name string by appending our PID
1439   char Buf[BufSize];
1440   char *Ptr = Buf;
1441   uint64_t PID = __getpid();
1442   Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
1443   if (__bolt_instr_use_pid) {
1444     Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
1445     Ptr = intToStr(Ptr, PID, 10);
1446     Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));
1447   }
1448   *Ptr++ = '\0';
1449   uint64_t FD = __open(Buf,
1450                        /*flags=*/0x241 /*O_WRONLY|O_TRUNC|O_CREAT*/,
1451                        /*mode=*/0666);
1452   if (static_cast<int64_t>(FD) < 0) {
1453     report("Error while trying to open profile file for writing: ");
1454     report(Buf);
1455     reportNumber("\nFailed with error number: 0x",
1456                  0 - static_cast<int64_t>(FD), 16);
1457     __exit(1);
1458   }
1459   return FD;
1460 }
1461 
1462 #endif
1463 
1464 } // anonymous namespace
1465 
1466 #if !defined(__APPLE__)
1467 
1468 /// Reset all counters in case you want to start profiling a new phase of your
1469 /// program independently of prior phases.
1470 /// The address of this function is printed by BOLT and this can be called by
1471 /// any attached debugger during runtime. There is a useful oneliner for gdb:
1472 ///
1473 ///   gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
1474 ///     -ex 'set confirm off' -ex quit
1475 ///
1476 /// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
1477 /// name.
1478 extern "C" void __bolt_instr_clear_counters() {
1479   memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,
1480          __bolt_num_counters * 8);
1481   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)
1482     GlobalIndCallCounters[I].resetCounters();
1483 }
1484 
1485 /// This is the entry point for profile writing.
1486 /// There are three ways of getting here:
1487 ///
1488 ///  * Program execution ended, finalization methods are running and BOLT
1489 ///    hooked into FINI from your binary dynamic section;
1490 ///  * You used the sleep timer option and during initialization we forked
1491 ///    a separete process that will call this function periodically;
1492 ///  * BOLT prints this function address so you can attach a debugger and
1493 ///    call this function directly to get your profile written to disk
1494 ///    on demand.
1495 ///
1496 extern "C" void __attribute((force_align_arg_pointer))
1497 __bolt_instr_data_dump() {
1498   // Already dumping
1499   if (!GlobalWriteProfileMutex->acquire())
1500     return;
1501 
1502   BumpPtrAllocator HashAlloc;
1503   HashAlloc.setMaxSize(0x6400000);
1504   ProfileWriterContext Ctx = readDescriptions();
1505   Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);
1506 
1507   DEBUG(printStats(Ctx));
1508 
1509   int FD = openProfile();
1510 
1511   BumpPtrAllocator Alloc;
1512   Alloc.setMaxSize(0x6400000);
1513   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1514   for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {
1515     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1516     Alloc.clear();
1517     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1518   }
1519   assert(FuncDesc == (void *)Ctx.Strings,
1520          "FuncDesc ptr must be equal to stringtable");
1521 
1522   writeIndirectCallProfile(FD, Ctx);
1523   Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);
1524 
1525   __fsync(FD);
1526   __close(FD);
1527   __munmap(Ctx.MMapPtr, Ctx.MMapSize);
1528   __close(Ctx.FileDesc);
1529   HashAlloc.destroy();
1530   GlobalWriteProfileMutex->release();
1531   DEBUG(report("Finished writing profile.\n"));
1532 }
1533 
1534 /// Event loop for our child process spawned during setup to dump profile data
1535 /// at user-specified intervals
1536 void watchProcess() {
1537   timespec ts, rem;
1538   uint64_t Ellapsed = 0ull;
1539   uint64_t ppid;
1540   if (__bolt_instr_wait_forks) {
1541     // Store parent pgid
1542     ppid = -__getpgid(0);
1543     // And leave parent process group
1544     __setpgid(0, 0);
1545   } else {
1546     // Store parent pid
1547     ppid = __getppid();
1548     if (ppid == 1) {
1549       // Parent already dead
1550       __bolt_instr_data_dump();
1551       goto out;
1552     }
1553   }
1554 
1555   ts.tv_sec = 1;
1556   ts.tv_nsec = 0;
1557   while (1) {
1558     __nanosleep(&ts, &rem);
1559     // This means our parent process or all its forks are dead,
1560     // so no need for us to keep dumping.
1561     if (__kill(ppid, 0) < 0) {
1562       if (__bolt_instr_no_counters_clear)
1563         __bolt_instr_data_dump();
1564       break;
1565     }
1566 
1567     if (++Ellapsed < __bolt_instr_sleep_time)
1568       continue;
1569 
1570     Ellapsed = 0;
1571     __bolt_instr_data_dump();
1572     if (__bolt_instr_no_counters_clear == false)
1573       __bolt_instr_clear_counters();
1574   }
1575 
1576 out:;
1577   DEBUG(report("My parent process is dead, bye!\n"));
1578   __exit(0);
1579 }
1580 
1581 extern "C" void __bolt_instr_indirect_call();
1582 extern "C" void __bolt_instr_indirect_tailcall();
1583 
1584 /// Initialization code
1585 extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {
1586   const uint64_t CountersStart =
1587       reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);
1588   const uint64_t CountersEnd = alignTo(
1589       reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),
1590       0x1000);
1591   DEBUG(reportNumber("replace mmap start: ", CountersStart, 16));
1592   DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16));
1593   assert (CountersEnd > CountersStart, "no counters");
1594   // Maps our counters to be shared instead of private, so we keep counting for
1595   // forked processes
1596   void *Ret =
1597       __mmap(CountersStart, CountersEnd - CountersStart, PROT_READ | PROT_WRITE,
1598              MAP_ANONYMOUS | MAP_SHARED | MAP_FIXED, -1, 0);
1599   __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;
1600   __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;
1601   // Conservatively reserve 100MiB shared pages
1602   GlobalAlloc.setMaxSize(0x6400000);
1603   GlobalAlloc.setShared(true);
1604   GlobalWriteProfileMutex = new (GlobalAlloc, 0) Mutex();
1605   if (__bolt_instr_num_ind_calls > 0)
1606     GlobalIndCallCounters =
1607         new (GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];
1608 
1609   if (__bolt_instr_sleep_time != 0) {
1610     // Separate instrumented process to the own process group
1611     if (__bolt_instr_wait_forks)
1612       __setpgid(0, 0);
1613 
1614     if (long PID = __fork())
1615       return;
1616     watchProcess();
1617   }
1618 }
1619 
1620 extern "C" __attribute((force_align_arg_pointer)) void
1621 instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {
1622   GlobalIndCallCounters[IndCallID].incrementVal(Target, GlobalAlloc);
1623 }
1624 
1625 /// We receive as in-stack arguments the identifier of the indirect call site
1626 /// as well as the target address for the call
1627 extern "C" __attribute((naked)) void __bolt_instr_indirect_call()
1628 {
1629   __asm__ __volatile__(SAVE_ALL
1630                        "mov 0xa0(%%rsp), %%rdi\n"
1631                        "mov 0x98(%%rsp), %%rsi\n"
1632                        "call instrumentIndirectCall\n"
1633                        RESTORE_ALL
1634                        "ret\n"
1635                        :::);
1636 }
1637 
1638 extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()
1639 {
1640   __asm__ __volatile__(SAVE_ALL
1641                        "mov 0x98(%%rsp), %%rdi\n"
1642                        "mov 0x90(%%rsp), %%rsi\n"
1643                        "call instrumentIndirectCall\n"
1644                        RESTORE_ALL
1645                        "ret\n"
1646                        :::);
1647 }
1648 
1649 /// This is hooking ELF's entry, it needs to save all machine state.
1650 extern "C" __attribute((naked)) void __bolt_instr_start()
1651 {
1652   __asm__ __volatile__(SAVE_ALL
1653                        "call __bolt_instr_setup\n"
1654                        RESTORE_ALL
1655                        "jmp __bolt_start_trampoline\n"
1656                        :::);
1657 }
1658 
1659 /// This is hooking into ELF's DT_FINI
1660 extern "C" void __bolt_instr_fini() {
1661   __bolt_fini_trampoline();
1662   if (__bolt_instr_sleep_time == 0)
1663     __bolt_instr_data_dump();
1664   DEBUG(report("Finished.\n"));
1665 }
1666 
1667 #endif
1668 
1669 #if defined(__APPLE__)
1670 
1671 extern "C" void __bolt_instr_data_dump() {
1672   ProfileWriterContext Ctx = readDescriptions();
1673 
1674   int FD = 2;
1675   BumpPtrAllocator Alloc;
1676   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1677   uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();
1678 
1679   for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {
1680     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1681     Alloc.clear();
1682     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1683   }
1684   assert(FuncDesc == (void *)Ctx.Strings,
1685          "FuncDesc ptr must be equal to stringtable");
1686 }
1687 
1688 // On OSX/iOS the final symbol name of an extern "C" function/variable contains
1689 // one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
1690 extern "C"
1691 __attribute__((section("__TEXT,__setup")))
1692 __attribute__((force_align_arg_pointer))
1693 void _bolt_instr_setup() {
1694   __asm__ __volatile__(SAVE_ALL :::);
1695 
1696   report("Hello!\n");
1697 
1698   __asm__ __volatile__(RESTORE_ALL :::);
1699 }
1700 
1701 extern "C"
1702 __attribute__((section("__TEXT,__fini")))
1703 __attribute__((force_align_arg_pointer))
1704 void _bolt_instr_fini() {
1705   report("Bye!\n");
1706   __bolt_instr_data_dump();
1707 }
1708 
1709 #endif
1710 #endif
1711