xref: /llvm-project/bolt/runtime/instr.cpp (revision a799298152e3ef08b4919cdaac7a614f7cca9bc6)
12f09f445SMaksim Panchenko //===- bolt/runtime/instr.cpp ---------------------------------------------===//
262aa74f8SRafael Auler //
3da752c9cSRafael Auler // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4da752c9cSRafael Auler // See https://llvm.org/LICENSE.txt for license information.
5da752c9cSRafael Auler // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
662aa74f8SRafael Auler //
762aa74f8SRafael Auler //===----------------------------------------------------------------------===//
862aa74f8SRafael Auler //
916a497c6SRafael Auler // BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
1016a497c6SRafael Auler // not support linking modules with dependencies on one another into the final
1116a497c6SRafael Auler // binary (TODO?), which means this library has to be self-contained in a single
1216a497c6SRafael Auler // module.
1316a497c6SRafael Auler //
1416a497c6SRafael Auler // All extern declarations here need to be defined by BOLT itself. Those will be
1516a497c6SRafael Auler // undefined symbols that BOLT needs to resolve by emitting these symbols with
1616a497c6SRafael Auler // MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
1716a497c6SRafael Auler // for defining the symbols here and these two files have a tight coupling: one
1816a497c6SRafael Auler // working statically when you run BOLT and another during program runtime when
1916a497c6SRafael Auler // you run an instrumented binary. The main goal here is to output an fdata file
2016a497c6SRafael Auler // (BOLT profile) with the instrumentation counters inserted by the static pass.
2116a497c6SRafael Auler // Counters for indirect calls are an exception, as we can't know them
2216a497c6SRafael Auler // statically. These counters are created and managed here. To allow this, we
2316a497c6SRafael Auler // need a minimal framework for allocating memory dynamically. We provide this
2416a497c6SRafael Auler // with the BumpPtrAllocator class (not LLVM's, but our own version of it).
2516a497c6SRafael Auler //
2616a497c6SRafael Auler // Since this code is intended to be inserted into any executable, we decided to
2716a497c6SRafael Auler // make it standalone and do not depend on any external libraries (i.e. language
2816a497c6SRafael Auler // support libraries, such as glibc or stdc++). To allow this, we provide a few
2916a497c6SRafael Auler // light implementations of common OS interacting functionalities using direct
3016a497c6SRafael Auler // syscall wrappers. Our simple allocator doesn't manage deallocations that
3116a497c6SRafael Auler // fragment the memory space, so it's stack based. This is the minimal framework
3216a497c6SRafael Auler // provided here to allow processing instrumented counters and writing fdata.
3316a497c6SRafael Auler //
3416a497c6SRafael Auler // In the C++ idiom used here, we never use or rely on constructors or
3516a497c6SRafael Auler // destructors for global objects. That's because those need support from the
3616a497c6SRafael Auler // linker in initialization/finalization code, and we want to keep our linker
3716a497c6SRafael Auler // very simple. Similarly, we don't create any global objects that are zero
3816a497c6SRafael Auler // initialized, since those would need to go .bss, which our simple linker also
3916a497c6SRafael Auler // don't support (TODO?).
4062aa74f8SRafael Auler //
4162aa74f8SRafael Auler //===----------------------------------------------------------------------===//
4262aa74f8SRafael Auler 
43cb8d701bSVladislav Khmelevsky #if defined (__x86_64__)
449bd71615SXun Li #include "common.h"
4562aa74f8SRafael Auler 
4616a497c6SRafael Auler // Enables a very verbose logging to stderr useful when debugging
47cc4b2fb6SRafael Auler //#define ENABLE_DEBUG
48cc4b2fb6SRafael Auler 
49cc4b2fb6SRafael Auler #ifdef ENABLE_DEBUG
50cc4b2fb6SRafael Auler #define DEBUG(X)                                                               \
51cc4b2fb6SRafael Auler   { X; }
52cc4b2fb6SRafael Auler #else
53cc4b2fb6SRafael Auler #define DEBUG(X)                                                               \
54cc4b2fb6SRafael Auler   {}
55cc4b2fb6SRafael Auler #endif
56cc4b2fb6SRafael Auler 
57af58da4eSVladislav Khmelevsky #pragma GCC visibility push(hidden)
583b876cc3SAlexander Shaposhnikov 
593b876cc3SAlexander Shaposhnikov extern "C" {
60553f28e9SVladislav Khmelevsky 
61553f28e9SVladislav Khmelevsky #if defined(__APPLE__)
623b876cc3SAlexander Shaposhnikov extern uint64_t* _bolt_instr_locations_getter();
633b876cc3SAlexander Shaposhnikov extern uint32_t _bolt_num_counters_getter();
643b876cc3SAlexander Shaposhnikov 
65a0dd5b05SAlexander Shaposhnikov extern uint8_t* _bolt_instr_tables_getter();
66a0dd5b05SAlexander Shaposhnikov extern uint32_t _bolt_instr_num_funcs_getter();
673b876cc3SAlexander Shaposhnikov 
683b876cc3SAlexander Shaposhnikov #else
69bbd9d610SAlexander Shaposhnikov 
7016a497c6SRafael Auler // Main counters inserted by instrumentation, incremented during runtime when
7116a497c6SRafael Auler // points of interest (locations) in the program are reached. Those are direct
7216a497c6SRafael Auler // calls and direct and indirect branches (local ones). There are also counters
7316a497c6SRafael Auler // for basic block execution if they are a spanning tree leaf and need to be
7416a497c6SRafael Auler // counted in order to infer the execution count of other edges of the CFG.
7562aa74f8SRafael Auler extern uint64_t __bolt_instr_locations[];
7616a497c6SRafael Auler extern uint32_t __bolt_num_counters;
7716a497c6SRafael Auler // Descriptions are serialized metadata about binary functions written by BOLT,
7816a497c6SRafael Auler // so we have a minimal understanding about the program structure. For a
7916a497c6SRafael Auler // reference on the exact format of this metadata, see *Description structs,
8016a497c6SRafael Auler // Location, IntrumentedNode and EntryNode.
8116a497c6SRafael Auler // Number of indirect call site descriptions
8216a497c6SRafael Auler extern uint32_t __bolt_instr_num_ind_calls;
8316a497c6SRafael Auler // Number of indirect call target descriptions
8416a497c6SRafael Auler extern uint32_t __bolt_instr_num_ind_targets;
85cc4b2fb6SRafael Auler // Number of function descriptions
86cc4b2fb6SRafael Auler extern uint32_t __bolt_instr_num_funcs;
8716a497c6SRafael Auler // Time to sleep across dumps (when we write the fdata profile to disk)
8816a497c6SRafael Auler extern uint32_t __bolt_instr_sleep_time;
8976d346caSVladislav Khmelevsky // Do not clear counters across dumps, rewrite file with the updated values
9076d346caSVladislav Khmelevsky extern bool __bolt_instr_no_counters_clear;
9176d346caSVladislav Khmelevsky // Wait until all forks of instrumented process will finish
9276d346caSVladislav Khmelevsky extern bool __bolt_instr_wait_forks;
93cc4b2fb6SRafael Auler // Filename to dump data to
9462aa74f8SRafael Auler extern char __bolt_instr_filename[];
95519cbbaaSVasily Leonenko // Instumented binary file path
96519cbbaaSVasily Leonenko extern char __bolt_instr_binpath[];
9716a497c6SRafael Auler // If true, append current PID to the fdata filename when creating it so
9816a497c6SRafael Auler // different invocations of the same program can be differentiated.
9916a497c6SRafael Auler extern bool __bolt_instr_use_pid;
10016a497c6SRafael Auler // Functions that will be used to instrument indirect calls. BOLT static pass
10116a497c6SRafael Auler // will identify indirect calls and modify them to load the address in these
10216a497c6SRafael Auler // trampolines and call this address instead. BOLT can't use direct calls to
10316a497c6SRafael Auler // our handlers because our addresses here are not known at analysis time. We
10416a497c6SRafael Auler // only support resolving dependencies from this file to the output of BOLT,
10516a497c6SRafael Auler // *not* the other way around.
10616a497c6SRafael Auler // TODO: We need better linking support to make that happen.
107361f3b55SVladislav Khmelevsky extern void (*__bolt_ind_call_counter_func_pointer)();
108361f3b55SVladislav Khmelevsky extern void (*__bolt_ind_tailcall_counter_func_pointer)();
109ad79d517SVasily Leonenko // Function pointers to init/fini trampoline routines in the binary, so we can
110ad79d517SVasily Leonenko // resume regular execution of these functions that we hooked
111553f28e9SVladislav Khmelevsky extern void __bolt_start_trampoline();
112553f28e9SVladislav Khmelevsky extern void __bolt_fini_trampoline();
11362aa74f8SRafael Auler 
114a0dd5b05SAlexander Shaposhnikov #endif
115553f28e9SVladislav Khmelevsky }
116a0dd5b05SAlexander Shaposhnikov 
117cc4b2fb6SRafael Auler namespace {
118cc4b2fb6SRafael Auler 
119cc4b2fb6SRafael Auler /// A simple allocator that mmaps a fixed size region and manages this space
120cc4b2fb6SRafael Auler /// in a stack fashion, meaning you always deallocate the last element that
12116a497c6SRafael Auler /// was allocated. In practice, we don't need to deallocate individual elements.
12216a497c6SRafael Auler /// We monotonically increase our usage and then deallocate everything once we
12316a497c6SRafael Auler /// are done processing something.
124cc4b2fb6SRafael Auler class BumpPtrAllocator {
12516a497c6SRafael Auler   /// This is written before each allocation and act as a canary to detect when
12616a497c6SRafael Auler   /// a bug caused our program to cross allocation boundaries.
127cc4b2fb6SRafael Auler   struct EntryMetadata {
128cc4b2fb6SRafael Auler     uint64_t Magic;
129cc4b2fb6SRafael Auler     uint64_t AllocSize;
130cc4b2fb6SRafael Auler   };
1319bd71615SXun Li 
132cc4b2fb6SRafael Auler public:
133faaefff6SAlexander Shaposhnikov   void *allocate(size_t Size) {
13416a497c6SRafael Auler     Lock L(M);
135a0dd5b05SAlexander Shaposhnikov 
136cc4b2fb6SRafael Auler     if (StackBase == nullptr) {
13716a497c6SRafael Auler       StackBase = reinterpret_cast<uint8_t *>(
1388b23a853SDenis Revunov           __mmap(0, MaxSize, PROT_READ | PROT_WRITE,
1398b23a853SDenis Revunov                  (Shared ? MAP_SHARED : MAP_PRIVATE) | MAP_ANONYMOUS, -1, 0));
1408ed172cfSDenis Revunov       assert(StackBase != MAP_FAILED,
1418ed172cfSDenis Revunov              "BumpPtrAllocator: failed to mmap stack!");
142cc4b2fb6SRafael Auler       StackSize = 0;
143cc4b2fb6SRafael Auler     }
144a0dd5b05SAlexander Shaposhnikov 
145cc4b2fb6SRafael Auler     Size = alignTo(Size + sizeof(EntryMetadata), 16);
146cc4b2fb6SRafael Auler     uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);
147cc4b2fb6SRafael Auler     auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);
14816a497c6SRafael Auler     M->Magic = Magic;
149cc4b2fb6SRafael Auler     M->AllocSize = Size;
150cc4b2fb6SRafael Auler     StackSize += Size;
15116a497c6SRafael Auler     assert(StackSize < MaxSize, "allocator ran out of memory");
152cc4b2fb6SRafael Auler     return AllocAddress;
153cc4b2fb6SRafael Auler   }
154cc4b2fb6SRafael Auler 
15516a497c6SRafael Auler #ifdef DEBUG
15616a497c6SRafael Auler   /// Element-wise deallocation is only used for debugging to catch memory
15716a497c6SRafael Auler   /// bugs by checking magic bytes. Ordinarily, we reset the allocator once
15816a497c6SRafael Auler   /// we are done with it. Reset is done with clear(). There's no need
15916a497c6SRafael Auler   /// to deallocate each element individually.
160cc4b2fb6SRafael Auler   void deallocate(void *Ptr) {
16116a497c6SRafael Auler     Lock L(M);
162cc4b2fb6SRafael Auler     uint8_t MetadataOffset = sizeof(EntryMetadata);
163cc4b2fb6SRafael Auler     auto *M = reinterpret_cast<EntryMetadata *>(
164cc4b2fb6SRafael Auler         reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);
165cc4b2fb6SRafael Auler     const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;
166cc4b2fb6SRafael Auler     // Validate size
167cc4b2fb6SRafael Auler     if (Ptr != StackTop - M->AllocSize) {
16816a497c6SRafael Auler       // Failed validation, check if it is a pointer returned by operator new []
169cc4b2fb6SRafael Auler       MetadataOffset +=
170cc4b2fb6SRafael Auler           sizeof(uint64_t); // Space for number of elements alloc'ed
171cc4b2fb6SRafael Auler       M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -
172cc4b2fb6SRafael Auler                                             MetadataOffset);
17316a497c6SRafael Auler       // Ok, it failed both checks if this assertion fails. Stop the program, we
17416a497c6SRafael Auler       // have a memory bug.
175cc4b2fb6SRafael Auler       assert(Ptr == StackTop - M->AllocSize,
176cc4b2fb6SRafael Auler              "must deallocate the last element alloc'ed");
177cc4b2fb6SRafael Auler     }
17816a497c6SRafael Auler     assert(M->Magic == Magic, "allocator magic is corrupt");
179cc4b2fb6SRafael Auler     StackSize -= M->AllocSize;
180cc4b2fb6SRafael Auler   }
18116a497c6SRafael Auler #else
18216a497c6SRafael Auler   void deallocate(void *) {}
18316a497c6SRafael Auler #endif
18416a497c6SRafael Auler 
18516a497c6SRafael Auler   void clear() {
18616a497c6SRafael Auler     Lock L(M);
18716a497c6SRafael Auler     StackSize = 0;
18816a497c6SRafael Auler   }
18916a497c6SRafael Auler 
19016a497c6SRafael Auler   /// Set mmap reservation size (only relevant before first allocation)
1919bd71615SXun Li   void setMaxSize(uint64_t Size) { MaxSize = Size; }
19216a497c6SRafael Auler 
19316a497c6SRafael Auler   /// Set mmap reservation privacy (only relevant before first allocation)
1949bd71615SXun Li   void setShared(bool S) { Shared = S; }
19516a497c6SRafael Auler 
19616a497c6SRafael Auler   void destroy() {
19716a497c6SRafael Auler     if (StackBase == nullptr)
19816a497c6SRafael Auler       return;
19916a497c6SRafael Auler     __munmap(StackBase, MaxSize);
20016a497c6SRafael Auler   }
201cc4b2fb6SRafael Auler 
2020cc19b56SDenis Revunov   // Placement operator to construct allocator in possibly shared mmaped memory
2030cc19b56SDenis Revunov   static void *operator new(size_t, void *Ptr) { return Ptr; };
2040cc19b56SDenis Revunov 
205cc4b2fb6SRafael Auler private:
20616a497c6SRafael Auler   static constexpr uint64_t Magic = 0x1122334455667788ull;
20716a497c6SRafael Auler   uint64_t MaxSize = 0xa00000;
208cc4b2fb6SRafael Auler   uint8_t *StackBase{nullptr};
209cc4b2fb6SRafael Auler   uint64_t StackSize{0};
21016a497c6SRafael Auler   bool Shared{false};
21116a497c6SRafael Auler   Mutex M;
212cc4b2fb6SRafael Auler };
213cc4b2fb6SRafael Auler 
21416a497c6SRafael Auler /// Used for allocating indirect call instrumentation counters. Initialized by
21516a497c6SRafael Auler /// __bolt_instr_setup, our initialization routine.
2160cc19b56SDenis Revunov BumpPtrAllocator *GlobalAlloc;
2170cc19b56SDenis Revunov 
2180cc19b56SDenis Revunov // Storage for GlobalAlloc which can be shared if not using
2190cc19b56SDenis Revunov // instrumentation-file-append-pid.
2200cc19b56SDenis Revunov void *GlobalMetadataStorage;
2210cc19b56SDenis Revunov 
222cc4b2fb6SRafael Auler } // anonymous namespace
223cc4b2fb6SRafael Auler 
224cc4b2fb6SRafael Auler // User-defined placement new operators. We only use those (as opposed to
225cc4b2fb6SRafael Auler // overriding the regular operator new) so we can keep our allocator in the
226cc4b2fb6SRafael Auler // stack instead of in a data section (global).
227faaefff6SAlexander Shaposhnikov void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }
228faaefff6SAlexander Shaposhnikov void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {
229cc4b2fb6SRafael Auler   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
230ea2182feSMaksim Panchenko   memset(Ptr, C, Sz);
231cc4b2fb6SRafael Auler   return Ptr;
232cc4b2fb6SRafael Auler }
233faaefff6SAlexander Shaposhnikov void *operator new[](size_t Sz, BumpPtrAllocator &A) {
234cc4b2fb6SRafael Auler   return A.allocate(Sz);
235cc4b2fb6SRafael Auler }
236faaefff6SAlexander Shaposhnikov void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {
237cc4b2fb6SRafael Auler   auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
238ea2182feSMaksim Panchenko   memset(Ptr, C, Sz);
239cc4b2fb6SRafael Auler   return Ptr;
240cc4b2fb6SRafael Auler }
241cc4b2fb6SRafael Auler // Only called during exception unwinding (useless). We must manually dealloc.
242cc4b2fb6SRafael Auler // C++ language weirdness
2439bd71615SXun Li void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }
244cc4b2fb6SRafael Auler 
245cc4b2fb6SRafael Auler namespace {
246cc4b2fb6SRafael Auler 
2479aa134dcSVasily Leonenko // Disable instrumentation optimizations that sacrifice profile accuracy
2489aa134dcSVasily Leonenko extern "C" bool __bolt_instr_conservative;
2499aa134dcSVasily Leonenko 
25016a497c6SRafael Auler /// Basic key-val atom stored in our hash
25116a497c6SRafael Auler struct SimpleHashTableEntryBase {
25216a497c6SRafael Auler   uint64_t Key;
25316a497c6SRafael Auler   uint64_t Val;
25447934c11SDenis Revunov   void dump(const char *Msg = nullptr) {
25547934c11SDenis Revunov     // TODO: make some sort of formatting function
25647934c11SDenis Revunov     // Currently we have to do it the ugly way because
25747934c11SDenis Revunov     // we want every message to be printed atomically via a single call to
25847934c11SDenis Revunov     // __write. If we use reportNumber() and others nultiple times, we'll get
25947934c11SDenis Revunov     // garbage in mulithreaded environment
26047934c11SDenis Revunov     char Buf[BufSize];
26147934c11SDenis Revunov     char *Ptr = Buf;
26247934c11SDenis Revunov     Ptr = intToStr(Ptr, __getpid(), 10);
26347934c11SDenis Revunov     *Ptr++ = ':';
26447934c11SDenis Revunov     *Ptr++ = ' ';
26547934c11SDenis Revunov     if (Msg)
26647934c11SDenis Revunov       Ptr = strCopy(Ptr, Msg, strLen(Msg));
26747934c11SDenis Revunov     *Ptr++ = '0';
26847934c11SDenis Revunov     *Ptr++ = 'x';
26947934c11SDenis Revunov     Ptr = intToStr(Ptr, (uint64_t)this, 16);
27047934c11SDenis Revunov     *Ptr++ = ':';
27147934c11SDenis Revunov     *Ptr++ = ' ';
27247934c11SDenis Revunov     Ptr = strCopy(Ptr, "MapEntry(0x", sizeof("MapEntry(0x") - 1);
27347934c11SDenis Revunov     Ptr = intToStr(Ptr, Key, 16);
27447934c11SDenis Revunov     *Ptr++ = ',';
27547934c11SDenis Revunov     *Ptr++ = ' ';
27647934c11SDenis Revunov     *Ptr++ = '0';
27747934c11SDenis Revunov     *Ptr++ = 'x';
27847934c11SDenis Revunov     Ptr = intToStr(Ptr, Val, 16);
27947934c11SDenis Revunov     *Ptr++ = ')';
28047934c11SDenis Revunov     *Ptr++ = '\n';
28147934c11SDenis Revunov     assert(Ptr - Buf < BufSize, "Buffer overflow!");
28247934c11SDenis Revunov     // print everything all at once for atomicity
28347934c11SDenis Revunov     __write(2, Buf, Ptr - Buf);
28447934c11SDenis Revunov   }
28516a497c6SRafael Auler };
28616a497c6SRafael Auler 
28716a497c6SRafael Auler /// This hash table implementation starts by allocating a table of size
28816a497c6SRafael Auler /// InitialSize. When conflicts happen in this main table, it resolves
28916a497c6SRafael Auler /// them by chaining a new table of size IncSize. It never reallocs as our
29016a497c6SRafael Auler /// allocator doesn't support it. The key is intended to be function pointers.
29116a497c6SRafael Auler /// There's no clever hash function (it's just x mod size, size being prime).
29216a497c6SRafael Auler /// I never tuned the coefficientes in the modular equation (TODO)
29316a497c6SRafael Auler /// This is used for indirect calls (each call site has one of this, so it
29416a497c6SRafael Auler /// should have a small footprint) and for tallying call counts globally for
29516a497c6SRafael Auler /// each target to check if we missed the origin of some calls (this one is a
29616a497c6SRafael Auler /// large instantiation of this template, since it is global for all call sites)
29716a497c6SRafael Auler template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,
29816a497c6SRafael Auler           uint32_t IncSize = 7>
29916a497c6SRafael Auler class SimpleHashTable {
30016a497c6SRafael Auler public:
30116a497c6SRafael Auler   using MapEntry = T;
30216a497c6SRafael Auler 
30316a497c6SRafael Auler   /// Increment by 1 the value of \p Key. If it is not in this table, it will be
30416a497c6SRafael Auler   /// added to the table and its value set to 1.
30516a497c6SRafael Auler   void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {
3060cc19b56SDenis Revunov     if (!__bolt_instr_conservative) {
3070cc19b56SDenis Revunov       TryLock L(M);
3080cc19b56SDenis Revunov       if (!L.isLocked())
3090cc19b56SDenis Revunov         return;
3100cc19b56SDenis Revunov       auto &E = getOrAllocEntry(Key, Alloc);
3110cc19b56SDenis Revunov       ++E.Val;
3120cc19b56SDenis Revunov       return;
3130cc19b56SDenis Revunov     }
3140cc19b56SDenis Revunov     Lock L(M);
3150cc19b56SDenis Revunov     auto &E = getOrAllocEntry(Key, Alloc);
3160cc19b56SDenis Revunov     ++E.Val;
31716a497c6SRafael Auler   }
31816a497c6SRafael Auler 
31916a497c6SRafael Auler   /// Basic member accessing interface. Here we pass the allocator explicitly to
32016a497c6SRafael Auler   /// avoid storing a pointer to it as part of this table (remember there is one
32116a497c6SRafael Auler   /// hash for each indirect call site, so we wan't to minimize our footprint).
32216a497c6SRafael Auler   MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {
3239aa134dcSVasily Leonenko     if (!__bolt_instr_conservative) {
3249aa134dcSVasily Leonenko       TryLock L(M);
3259aa134dcSVasily Leonenko       if (!L.isLocked())
3269aa134dcSVasily Leonenko         return NoEntry;
3279aa134dcSVasily Leonenko       return getOrAllocEntry(Key, Alloc);
3289aa134dcSVasily Leonenko     }
32916a497c6SRafael Auler     Lock L(M);
3309aa134dcSVasily Leonenko     return getOrAllocEntry(Key, Alloc);
33116a497c6SRafael Auler   }
33216a497c6SRafael Auler 
33316a497c6SRafael Auler   /// Traverses all elements in the table
33416a497c6SRafael Auler   template <typename... Args>
33516a497c6SRafael Auler   void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {
336bd301a41SMichał Chojnowski     Lock L(M);
33716a497c6SRafael Auler     if (!TableRoot)
33816a497c6SRafael Auler       return;
33916a497c6SRafael Auler     return forEachElement(Callback, InitialSize, TableRoot, args...);
34016a497c6SRafael Auler   }
34116a497c6SRafael Auler 
34216a497c6SRafael Auler   void resetCounters();
34316a497c6SRafael Auler 
34416a497c6SRafael Auler private:
34516a497c6SRafael Auler   constexpr static uint64_t VacantMarker = 0;
34616a497c6SRafael Auler   constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;
34716a497c6SRafael Auler 
34816a497c6SRafael Auler   MapEntry *TableRoot{nullptr};
3499aa134dcSVasily Leonenko   MapEntry NoEntry;
35016a497c6SRafael Auler   Mutex M;
35116a497c6SRafael Auler 
35216a497c6SRafael Auler   template <typename... Args>
35316a497c6SRafael Auler   void forEachElement(void (*Callback)(MapEntry &, Args...),
35416a497c6SRafael Auler                       uint32_t NumEntries, MapEntry *Entries, Args... args) {
355c7306cc2SAmir Ayupov     for (uint32_t I = 0; I < NumEntries; ++I) {
356c7306cc2SAmir Ayupov       MapEntry &Entry = Entries[I];
35716a497c6SRafael Auler       if (Entry.Key == VacantMarker)
35816a497c6SRafael Auler         continue;
35916a497c6SRafael Auler       if (Entry.Key & FollowUpTableMarker) {
3600cc19b56SDenis Revunov         MapEntry *Next =
3610cc19b56SDenis Revunov             reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker);
3620cc19b56SDenis Revunov         assert(Next != Entries, "Circular reference!");
3630cc19b56SDenis Revunov         forEachElement(Callback, IncSize, Next, args...);
36416a497c6SRafael Auler         continue;
36516a497c6SRafael Auler       }
36616a497c6SRafael Auler       Callback(Entry, args...);
36716a497c6SRafael Auler     }
36816a497c6SRafael Auler   }
36916a497c6SRafael Auler 
37016a497c6SRafael Auler   MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {
37116a497c6SRafael Auler     TableRoot = new (Alloc, 0) MapEntry[InitialSize];
372c7306cc2SAmir Ayupov     MapEntry &Entry = TableRoot[Key % InitialSize];
37316a497c6SRafael Auler     Entry.Key = Key;
37447934c11SDenis Revunov     // DEBUG(Entry.dump("Created root entry: "));
37516a497c6SRafael Auler     return Entry;
37616a497c6SRafael Auler   }
37716a497c6SRafael Auler 
37816a497c6SRafael Auler   MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,
37916a497c6SRafael Auler                      BumpPtrAllocator &Alloc, int CurLevel) {
38047934c11SDenis Revunov     // DEBUG(reportNumber("getEntry called, level ", CurLevel, 10));
38116a497c6SRafael Auler     const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;
38216a497c6SRafael Auler     uint64_t Remainder = Selector / NumEntries;
38316a497c6SRafael Auler     Selector = Selector % NumEntries;
384c7306cc2SAmir Ayupov     MapEntry &Entry = Entries[Selector];
38516a497c6SRafael Auler 
38616a497c6SRafael Auler     // A hit
38716a497c6SRafael Auler     if (Entry.Key == Key) {
38847934c11SDenis Revunov       // DEBUG(Entry.dump("Hit: "));
38916a497c6SRafael Auler       return Entry;
39016a497c6SRafael Auler     }
39116a497c6SRafael Auler 
39216a497c6SRafael Auler     // Vacant - add new entry
39316a497c6SRafael Auler     if (Entry.Key == VacantMarker) {
39416a497c6SRafael Auler       Entry.Key = Key;
39547934c11SDenis Revunov       // DEBUG(Entry.dump("Adding new entry: "));
39616a497c6SRafael Auler       return Entry;
39716a497c6SRafael Auler     }
39816a497c6SRafael Auler 
39916a497c6SRafael Auler     // Defer to the next level
40016a497c6SRafael Auler     if (Entry.Key & FollowUpTableMarker) {
40116a497c6SRafael Auler       return getEntry(
40216a497c6SRafael Auler           reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),
40316a497c6SRafael Auler           Key, Remainder, Alloc, CurLevel + 1);
40416a497c6SRafael Auler     }
40516a497c6SRafael Auler 
40616a497c6SRafael Auler     // Conflict - create the next level
40747934c11SDenis Revunov     // DEBUG(Entry.dump("Creating new level: "));
40847934c11SDenis Revunov 
40916a497c6SRafael Auler     MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];
41047934c11SDenis Revunov     // DEBUG(
41147934c11SDenis Revunov     //     reportNumber("Newly allocated level: 0x", uint64_t(NextLevelTbl),
41247934c11SDenis Revunov     //     16));
41316a497c6SRafael Auler     uint64_t CurEntrySelector = Entry.Key / InitialSize;
41416a497c6SRafael Auler     for (int I = 0; I < CurLevel; ++I)
41516a497c6SRafael Auler       CurEntrySelector /= IncSize;
41616a497c6SRafael Auler     CurEntrySelector = CurEntrySelector % IncSize;
41716a497c6SRafael Auler     NextLevelTbl[CurEntrySelector] = Entry;
41816a497c6SRafael Auler     Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;
419ad4e0770SDenis Revunov     assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) !=
420ad4e0770SDenis Revunov                uint64_t(Entries),
421ad4e0770SDenis Revunov            "circular reference created!\n");
42247934c11SDenis Revunov     // DEBUG(NextLevelTbl[CurEntrySelector].dump("New level entry: "));
42347934c11SDenis Revunov     // DEBUG(Entry.dump("Updated old entry: "));
42416a497c6SRafael Auler     return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);
42516a497c6SRafael Auler   }
4269aa134dcSVasily Leonenko 
4279aa134dcSVasily Leonenko   MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {
4280cc19b56SDenis Revunov     if (TableRoot) {
4290cc19b56SDenis Revunov       MapEntry &E = getEntry(TableRoot, Key, Key, Alloc, 0);
4300cc19b56SDenis Revunov       assert(!(E.Key & FollowUpTableMarker), "Invalid entry!");
4310cc19b56SDenis Revunov       return E;
4320cc19b56SDenis Revunov     }
4339aa134dcSVasily Leonenko     return firstAllocation(Key, Alloc);
4349aa134dcSVasily Leonenko   }
43516a497c6SRafael Auler };
43616a497c6SRafael Auler 
43716a497c6SRafael Auler template <typename T> void resetIndCallCounter(T &Entry) {
43816a497c6SRafael Auler   Entry.Val = 0;
43916a497c6SRafael Auler }
44016a497c6SRafael Auler 
44116a497c6SRafael Auler template <typename T, uint32_t X, uint32_t Y>
44216a497c6SRafael Auler void SimpleHashTable<T, X, Y>::resetCounters() {
44316a497c6SRafael Auler   forEachElement(resetIndCallCounter);
44416a497c6SRafael Auler }
44516a497c6SRafael Auler 
44616a497c6SRafael Auler /// Represents a hash table mapping a function target address to its counter.
44716a497c6SRafael Auler using IndirectCallHashTable = SimpleHashTable<>;
44816a497c6SRafael Auler 
44916a497c6SRafael Auler /// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
45016a497c6SRafael Auler /// global array of all hash tables storing indirect call destinations happening
45116a497c6SRafael Auler /// during runtime, one table per call site.
45216a497c6SRafael Auler IndirectCallHashTable *GlobalIndCallCounters{
45316a497c6SRafael Auler     reinterpret_cast<IndirectCallHashTable *>(1)};
45416a497c6SRafael Auler 
45516a497c6SRafael Auler /// Don't allow reentrancy in the fdata writing phase - only one thread writes
45616a497c6SRafael Auler /// it
45716a497c6SRafael Auler Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};
45816a497c6SRafael Auler 
45916a497c6SRafael Auler /// Store number of calls in additional to target address (Key) and frequency
46016a497c6SRafael Auler /// as perceived by the basic block counter (Val).
46116a497c6SRafael Auler struct CallFlowEntryBase : public SimpleHashTableEntryBase {
46216a497c6SRafael Auler   uint64_t Calls;
46316a497c6SRafael Auler };
46416a497c6SRafael Auler 
46516a497c6SRafael Auler using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;
46616a497c6SRafael Auler 
46716a497c6SRafael Auler /// This is a large table indexing all possible call targets (indirect and
46816a497c6SRafael Auler /// direct ones). The goal is to find mismatches between number of calls (for
46916a497c6SRafael Auler /// those calls we were able to track) and the entry basic block counter of the
47016a497c6SRafael Auler /// callee. In most cases, these two should be equal. If not, there are two
47116a497c6SRafael Auler /// possible scenarios here:
47216a497c6SRafael Auler ///
47316a497c6SRafael Auler ///  * Entry BB has higher frequency than all known calls to this function.
47416a497c6SRafael Auler ///    In this case, we have dynamic library code or any uninstrumented code
47516a497c6SRafael Auler ///    calling this function. We will write the profile for these untracked
47616a497c6SRafael Auler ///    calls as having source "0 [unknown] 0" in the fdata file.
47716a497c6SRafael Auler ///
47816a497c6SRafael Auler ///  * Number of known calls is higher than the frequency of entry BB
47916a497c6SRafael Auler ///    This only happens when there is no counter for the entry BB / callee
48016a497c6SRafael Auler ///    function is not simple (in BOLT terms). We don't do anything special
48116a497c6SRafael Auler ///    here and just ignore those (we still report all calls to the non-simple
48216a497c6SRafael Auler ///    function, though).
48316a497c6SRafael Auler ///
48416a497c6SRafael Auler class CallFlowHashTable : public CallFlowHashTableBase {
48516a497c6SRafael Auler public:
48616a497c6SRafael Auler   CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
48716a497c6SRafael Auler 
48816a497c6SRafael Auler   MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }
48916a497c6SRafael Auler 
49016a497c6SRafael Auler private:
49116a497c6SRafael Auler   // Different than the hash table for indirect call targets, we do store the
49216a497c6SRafael Auler   // allocator here since there is only one call flow hash and space overhead
49316a497c6SRafael Auler   // is negligible.
49416a497c6SRafael Auler   BumpPtrAllocator &Alloc;
49516a497c6SRafael Auler };
49616a497c6SRafael Auler 
49716a497c6SRafael Auler ///
49816a497c6SRafael Auler /// Description metadata emitted by BOLT to describe the program - refer to
49916a497c6SRafael Auler /// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
50016a497c6SRafael Auler ///
50116a497c6SRafael Auler struct Location {
50216a497c6SRafael Auler   uint32_t FunctionName;
50316a497c6SRafael Auler   uint32_t Offset;
50416a497c6SRafael Auler };
50516a497c6SRafael Auler 
50616a497c6SRafael Auler struct CallDescription {
50716a497c6SRafael Auler   Location From;
50816a497c6SRafael Auler   uint32_t FromNode;
50916a497c6SRafael Auler   Location To;
51016a497c6SRafael Auler   uint32_t Counter;
51116a497c6SRafael Auler   uint64_t TargetAddress;
51216a497c6SRafael Auler };
51316a497c6SRafael Auler 
51416a497c6SRafael Auler using IndCallDescription = Location;
51516a497c6SRafael Auler 
51616a497c6SRafael Auler struct IndCallTargetDescription {
51716a497c6SRafael Auler   Location Loc;
51816a497c6SRafael Auler   uint64_t Address;
51916a497c6SRafael Auler };
52016a497c6SRafael Auler 
52116a497c6SRafael Auler struct EdgeDescription {
52216a497c6SRafael Auler   Location From;
52316a497c6SRafael Auler   uint32_t FromNode;
52416a497c6SRafael Auler   Location To;
52516a497c6SRafael Auler   uint32_t ToNode;
52616a497c6SRafael Auler   uint32_t Counter;
52716a497c6SRafael Auler };
52816a497c6SRafael Auler 
52916a497c6SRafael Auler struct InstrumentedNode {
53016a497c6SRafael Auler   uint32_t Node;
53116a497c6SRafael Auler   uint32_t Counter;
53216a497c6SRafael Auler };
53316a497c6SRafael Auler 
53416a497c6SRafael Auler struct EntryNode {
53516a497c6SRafael Auler   uint64_t Node;
53616a497c6SRafael Auler   uint64_t Address;
53716a497c6SRafael Auler };
53816a497c6SRafael Auler 
53916a497c6SRafael Auler struct FunctionDescription {
54016a497c6SRafael Auler   uint32_t NumLeafNodes;
54116a497c6SRafael Auler   const InstrumentedNode *LeafNodes;
54216a497c6SRafael Auler   uint32_t NumEdges;
54316a497c6SRafael Auler   const EdgeDescription *Edges;
54416a497c6SRafael Auler   uint32_t NumCalls;
54516a497c6SRafael Auler   const CallDescription *Calls;
54616a497c6SRafael Auler   uint32_t NumEntryNodes;
54716a497c6SRafael Auler   const EntryNode *EntryNodes;
54816a497c6SRafael Auler 
54916a497c6SRafael Auler   /// Constructor will parse the serialized function metadata written by BOLT
55016a497c6SRafael Auler   FunctionDescription(const uint8_t *FuncDesc);
55116a497c6SRafael Auler 
55216a497c6SRafael Auler   uint64_t getSize() const {
55316a497c6SRafael Auler     return 16 + NumLeafNodes * sizeof(InstrumentedNode) +
55416a497c6SRafael Auler            NumEdges * sizeof(EdgeDescription) +
55516a497c6SRafael Auler            NumCalls * sizeof(CallDescription) +
55616a497c6SRafael Auler            NumEntryNodes * sizeof(EntryNode);
55716a497c6SRafael Auler   }
55816a497c6SRafael Auler };
55916a497c6SRafael Auler 
56016a497c6SRafael Auler /// The context is created when the fdata profile needs to be written to disk
56116a497c6SRafael Auler /// and we need to interpret our runtime counters. It contains pointers to the
56216a497c6SRafael Auler /// mmaped binary (only the BOLT written metadata section). Deserialization
56316a497c6SRafael Auler /// should be straightforward as most data is POD or an array of POD elements.
56416a497c6SRafael Auler /// This metadata is used to reconstruct function CFGs.
56516a497c6SRafael Auler struct ProfileWriterContext {
56616a497c6SRafael Auler   IndCallDescription *IndCallDescriptions;
56716a497c6SRafael Auler   IndCallTargetDescription *IndCallTargets;
56816a497c6SRafael Auler   uint8_t *FuncDescriptions;
56916a497c6SRafael Auler   char *Strings;  // String table with function names used in this binary
57016a497c6SRafael Auler   int FileDesc;   // File descriptor for the file on disk backing this
57116a497c6SRafael Auler                   // information in memory via mmap
57216a497c6SRafael Auler   void *MMapPtr;  // The mmap ptr
57316a497c6SRafael Auler   int MMapSize;   // The mmap size
57416a497c6SRafael Auler 
57516a497c6SRafael Auler   /// Hash table storing all possible call destinations to detect untracked
57616a497c6SRafael Auler   /// calls and correctly report them as [unknown] in output fdata.
57716a497c6SRafael Auler   CallFlowHashTable *CallFlowTable;
57816a497c6SRafael Auler 
57916a497c6SRafael Auler   /// Lookup the sorted indirect call target vector to fetch function name and
58016a497c6SRafael Auler   /// offset for an arbitrary function pointer.
58116a497c6SRafael Auler   const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;
58216a497c6SRafael Auler };
58316a497c6SRafael Auler 
58416a497c6SRafael Auler /// Perform a string comparison and returns zero if Str1 matches Str2. Compares
58516a497c6SRafael Auler /// at most Size characters.
586cc4b2fb6SRafael Auler int compareStr(const char *Str1, const char *Str2, int Size) {
587821480d2SRafael Auler   while (*Str1 == *Str2) {
588821480d2SRafael Auler     if (*Str1 == '\0' || --Size == 0)
589821480d2SRafael Auler       return 0;
590821480d2SRafael Auler     ++Str1;
591821480d2SRafael Auler     ++Str2;
592821480d2SRafael Auler   }
593821480d2SRafael Auler   return 1;
594821480d2SRafael Auler }
595821480d2SRafael Auler 
59616a497c6SRafael Auler /// Output Location to the fdata file
59716a497c6SRafael Auler char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,
598cc4b2fb6SRafael Auler                    const Location Loc, uint32_t BufSize) {
599821480d2SRafael Auler   // fdata location format: Type Name Offset
600821480d2SRafael Auler   // Type 1 - regular symbol
601821480d2SRafael Auler   OutBuf = strCopy(OutBuf, "1 ");
60216a497c6SRafael Auler   const char *Str = Ctx.Strings + Loc.FunctionName;
603cc4b2fb6SRafael Auler   uint32_t Size = 25;
60462aa74f8SRafael Auler   while (*Str) {
60562aa74f8SRafael Auler     *OutBuf++ = *Str++;
606cc4b2fb6SRafael Auler     if (++Size >= BufSize)
607cc4b2fb6SRafael Auler       break;
60862aa74f8SRafael Auler   }
609cc4b2fb6SRafael Auler   assert(!*Str, "buffer overflow, function name too large");
61062aa74f8SRafael Auler   *OutBuf++ = ' ';
611821480d2SRafael Auler   OutBuf = intToStr(OutBuf, Loc.Offset, 16);
61262aa74f8SRafael Auler   *OutBuf++ = ' ';
61362aa74f8SRafael Auler   return OutBuf;
61462aa74f8SRafael Auler }
61562aa74f8SRafael Auler 
61616a497c6SRafael Auler /// Read and deserialize a function description written by BOLT. \p FuncDesc
61716a497c6SRafael Auler /// points at the beginning of the function metadata structure in the file.
61816a497c6SRafael Auler /// See Instrumentation::emitTablesAsELFNote()
61916a497c6SRafael Auler FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {
62016a497c6SRafael Auler   NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);
62116a497c6SRafael Auler   DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10));
62216a497c6SRafael Auler   LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);
62316a497c6SRafael Auler 
62416a497c6SRafael Auler   NumEdges = *reinterpret_cast<const uint32_t *>(
62516a497c6SRafael Auler       FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));
62616a497c6SRafael Auler   DEBUG(reportNumber("NumEdges = ", NumEdges, 10));
62716a497c6SRafael Auler   Edges = reinterpret_cast<const EdgeDescription *>(
62816a497c6SRafael Auler       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));
62916a497c6SRafael Auler 
63016a497c6SRafael Auler   NumCalls = *reinterpret_cast<const uint32_t *>(
63116a497c6SRafael Auler       FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +
63216a497c6SRafael Auler       NumEdges * sizeof(EdgeDescription));
63316a497c6SRafael Auler   DEBUG(reportNumber("NumCalls = ", NumCalls, 10));
63416a497c6SRafael Auler   Calls = reinterpret_cast<const CallDescription *>(
63516a497c6SRafael Auler       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
63616a497c6SRafael Auler       NumEdges * sizeof(EdgeDescription));
63716a497c6SRafael Auler   NumEntryNodes = *reinterpret_cast<const uint32_t *>(
63816a497c6SRafael Auler       FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
63916a497c6SRafael Auler       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
64016a497c6SRafael Auler   DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10));
64116a497c6SRafael Auler   EntryNodes = reinterpret_cast<const EntryNode *>(
64216a497c6SRafael Auler       FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +
64316a497c6SRafael Auler       NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
64416a497c6SRafael Auler }
64516a497c6SRafael Auler 
64616a497c6SRafael Auler /// Read and mmap descriptions written by BOLT from the executable's notes
64716a497c6SRafael Auler /// section
648a0dd5b05SAlexander Shaposhnikov #if defined(HAVE_ELF_H) and !defined(__APPLE__)
6492ffd6e2bSElvina Yakubova 
6502ffd6e2bSElvina Yakubova void *__attribute__((noinline)) __get_pc() {
6512ffd6e2bSElvina Yakubova   return __builtin_extract_return_addr(__builtin_return_address(0));
6522ffd6e2bSElvina Yakubova }
6532ffd6e2bSElvina Yakubova 
6542ffd6e2bSElvina Yakubova /// Get string with address and parse it to hex pair <StartAddress, EndAddress>
6552ffd6e2bSElvina Yakubova bool parseAddressRange(const char *Str, uint64_t &StartAddress,
6562ffd6e2bSElvina Yakubova                        uint64_t &EndAddress) {
6572ffd6e2bSElvina Yakubova   if (!Str)
6582ffd6e2bSElvina Yakubova     return false;
6592ffd6e2bSElvina Yakubova   // Parsed string format: <hex1>-<hex2>
6602ffd6e2bSElvina Yakubova   StartAddress = hexToLong(Str, '-');
6612ffd6e2bSElvina Yakubova   while (*Str && *Str != '-')
6622ffd6e2bSElvina Yakubova     ++Str;
6632ffd6e2bSElvina Yakubova   if (!*Str)
6642ffd6e2bSElvina Yakubova     return false;
6652ffd6e2bSElvina Yakubova   ++Str; // swallow '-'
6662ffd6e2bSElvina Yakubova   EndAddress = hexToLong(Str);
6672ffd6e2bSElvina Yakubova   return true;
6682ffd6e2bSElvina Yakubova }
6692ffd6e2bSElvina Yakubova 
6702ffd6e2bSElvina Yakubova /// Get full path to the real binary by getting current virtual address
6712ffd6e2bSElvina Yakubova /// and searching for the appropriate link in address range in
6722ffd6e2bSElvina Yakubova /// /proc/self/map_files
6732ffd6e2bSElvina Yakubova static char *getBinaryPath() {
6742ffd6e2bSElvina Yakubova   const uint32_t BufSize = 1024;
67546bc197dSMarius Wachtler   const uint32_t NameMax = 4096;
6762ffd6e2bSElvina Yakubova   const char DirPath[] = "/proc/self/map_files/";
6772ffd6e2bSElvina Yakubova   static char TargetPath[NameMax] = {};
6782ffd6e2bSElvina Yakubova   char Buf[BufSize];
6792ffd6e2bSElvina Yakubova 
680519cbbaaSVasily Leonenko   if (__bolt_instr_binpath[0] != '\0')
681519cbbaaSVasily Leonenko     return __bolt_instr_binpath;
682519cbbaaSVasily Leonenko 
6832ffd6e2bSElvina Yakubova   if (TargetPath[0] != '\0')
6842ffd6e2bSElvina Yakubova     return TargetPath;
6852ffd6e2bSElvina Yakubova 
6862ffd6e2bSElvina Yakubova   unsigned long CurAddr = (unsigned long)__get_pc();
68760bbddf3SDenis Revunov   uint64_t FDdir = __open(DirPath, O_RDONLY,
688821480d2SRafael Auler                           /*mode=*/0666);
6893b00a3a2SMarius Wachtler   assert(static_cast<int64_t>(FDdir) >= 0,
6902ffd6e2bSElvina Yakubova          "failed to open /proc/self/map_files");
6912ffd6e2bSElvina Yakubova 
6922ffd6e2bSElvina Yakubova   while (long Nread = __getdents(FDdir, (struct dirent *)Buf, BufSize)) {
6932ffd6e2bSElvina Yakubova     assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");
6942ffd6e2bSElvina Yakubova 
6952ffd6e2bSElvina Yakubova     struct dirent *d;
6962ffd6e2bSElvina Yakubova     for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {
6972ffd6e2bSElvina Yakubova       d = (struct dirent *)(Buf + Bpos);
6982ffd6e2bSElvina Yakubova 
6992ffd6e2bSElvina Yakubova       uint64_t StartAddress, EndAddress;
7002ffd6e2bSElvina Yakubova       if (!parseAddressRange(d->d_name, StartAddress, EndAddress))
7012ffd6e2bSElvina Yakubova         continue;
7022ffd6e2bSElvina Yakubova       if (CurAddr < StartAddress || CurAddr > EndAddress)
7032ffd6e2bSElvina Yakubova         continue;
7042ffd6e2bSElvina Yakubova       char FindBuf[NameMax];
7052ffd6e2bSElvina Yakubova       char *C = strCopy(FindBuf, DirPath, NameMax);
7062ffd6e2bSElvina Yakubova       C = strCopy(C, d->d_name, NameMax - (C - FindBuf));
7072ffd6e2bSElvina Yakubova       *C = '\0';
7082ffd6e2bSElvina Yakubova       uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));
7092ffd6e2bSElvina Yakubova       assert(Ret != -1 && Ret != BufSize, "readlink error");
7102ffd6e2bSElvina Yakubova       TargetPath[Ret] = '\0';
7112ffd6e2bSElvina Yakubova       return TargetPath;
7122ffd6e2bSElvina Yakubova     }
7132ffd6e2bSElvina Yakubova   }
7142ffd6e2bSElvina Yakubova   return nullptr;
7152ffd6e2bSElvina Yakubova }
7162ffd6e2bSElvina Yakubova 
7172ffd6e2bSElvina Yakubova ProfileWriterContext readDescriptions() {
7182ffd6e2bSElvina Yakubova   ProfileWriterContext Result;
7192ffd6e2bSElvina Yakubova   char *BinPath = getBinaryPath();
7202ffd6e2bSElvina Yakubova   assert(BinPath && BinPath[0] != '\0', "failed to find binary path");
7212ffd6e2bSElvina Yakubova 
72260bbddf3SDenis Revunov   uint64_t FD = __open(BinPath, O_RDONLY,
7232ffd6e2bSElvina Yakubova                        /*mode=*/0666);
7243b00a3a2SMarius Wachtler   assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");
7252ffd6e2bSElvina Yakubova 
726821480d2SRafael Auler   Result.FileDesc = FD;
727821480d2SRafael Auler 
728821480d2SRafael Auler   // mmap our binary to memory
72960bbddf3SDenis Revunov   uint64_t Size = __lseek(FD, 0, SEEK_END);
730821480d2SRafael Auler   uint8_t *BinContents = reinterpret_cast<uint8_t *>(
7318b23a853SDenis Revunov       __mmap(0, Size, PROT_READ, MAP_PRIVATE, FD, 0));
7328ed172cfSDenis Revunov   assert(BinContents != MAP_FAILED, "readDescriptions: Failed to mmap self!");
733821480d2SRafael Auler   Result.MMapPtr = BinContents;
734821480d2SRafael Auler   Result.MMapSize = Size;
735821480d2SRafael Auler   Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);
736821480d2SRafael Auler   Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);
737821480d2SRafael Auler   Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(
738821480d2SRafael Auler       BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);
739821480d2SRafael Auler 
740821480d2SRafael Auler   // Find .bolt.instr.tables with the data we need and set pointers to it
741821480d2SRafael Auler   for (int I = 0; I < Hdr->e_shnum; ++I) {
742821480d2SRafael Auler     char *SecName = reinterpret_cast<char *>(
743821480d2SRafael Auler         BinContents + StringTblHeader->sh_offset + Shdr->sh_name);
744821480d2SRafael Auler     if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {
745821480d2SRafael Auler       Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +
746821480d2SRafael Auler                                             (I + 1) * Hdr->e_shentsize);
747821480d2SRafael Auler       continue;
748821480d2SRafael Auler     }
749821480d2SRafael Auler     // Actual contents of the ELF note start after offset 20 decimal:
750821480d2SRafael Auler     // Offset 0: Producer name size (4 bytes)
751821480d2SRafael Auler     // Offset 4: Contents size (4 bytes)
752821480d2SRafael Auler     // Offset 8: Note type (4 bytes)
753821480d2SRafael Auler     // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)
754821480d2SRafael Auler     // Offset 20: Contents
75516a497c6SRafael Auler     uint32_t IndCallDescSize =
756cc4b2fb6SRafael Auler         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);
75716a497c6SRafael Auler     uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(
75816a497c6SRafael Auler         BinContents + Shdr->sh_offset + 24 + IndCallDescSize);
75916a497c6SRafael Auler     uint32_t FuncDescSize =
76016a497c6SRafael Auler         *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +
76116a497c6SRafael Auler                                       IndCallDescSize + IndCallTargetDescSize);
76216a497c6SRafael Auler     Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(
76316a497c6SRafael Auler         BinContents + Shdr->sh_offset + 24);
76416a497c6SRafael Auler     Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
76516a497c6SRafael Auler         BinContents + Shdr->sh_offset + 28 + IndCallDescSize);
76616a497c6SRafael Auler     Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +
76716a497c6SRafael Auler                               IndCallDescSize + IndCallTargetDescSize;
76816a497c6SRafael Auler     Result.Strings = reinterpret_cast<char *>(
76916a497c6SRafael Auler         BinContents + Shdr->sh_offset + 32 + IndCallDescSize +
77016a497c6SRafael Auler         IndCallTargetDescSize + FuncDescSize);
771821480d2SRafael Auler     return Result;
772821480d2SRafael Auler   }
773821480d2SRafael Auler   const char ErrMsg[] =
774821480d2SRafael Auler       "BOLT instrumentation runtime error: could not find section "
775821480d2SRafael Auler       ".bolt.instr.tables\n";
776821480d2SRafael Auler   reportError(ErrMsg, sizeof(ErrMsg));
777821480d2SRafael Auler   return Result;
778821480d2SRafael Auler }
779a0dd5b05SAlexander Shaposhnikov 
780ba31344fSRafael Auler #else
781a0dd5b05SAlexander Shaposhnikov 
78216a497c6SRafael Auler ProfileWriterContext readDescriptions() {
78316a497c6SRafael Auler   ProfileWriterContext Result;
784a0dd5b05SAlexander Shaposhnikov   uint8_t *Tables = _bolt_instr_tables_getter();
785a0dd5b05SAlexander Shaposhnikov   uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);
786a0dd5b05SAlexander Shaposhnikov   uint32_t IndCallTargetDescSize =
787a0dd5b05SAlexander Shaposhnikov       *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);
788a0dd5b05SAlexander Shaposhnikov   uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(
789a0dd5b05SAlexander Shaposhnikov       Tables + 8 + IndCallDescSize + IndCallTargetDescSize);
790a0dd5b05SAlexander Shaposhnikov   Result.IndCallDescriptions =
791a0dd5b05SAlexander Shaposhnikov       reinterpret_cast<IndCallDescription *>(Tables + 4);
792a0dd5b05SAlexander Shaposhnikov   Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
793a0dd5b05SAlexander Shaposhnikov       Tables + 8 + IndCallDescSize);
794a0dd5b05SAlexander Shaposhnikov   Result.FuncDescriptions =
795a0dd5b05SAlexander Shaposhnikov       Tables + 12 + IndCallDescSize + IndCallTargetDescSize;
796a0dd5b05SAlexander Shaposhnikov   Result.Strings = reinterpret_cast<char *>(
797a0dd5b05SAlexander Shaposhnikov       Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);
798ba31344fSRafael Auler   return Result;
799ba31344fSRafael Auler }
800a0dd5b05SAlexander Shaposhnikov 
801ba31344fSRafael Auler #endif
802821480d2SRafael Auler 
803a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
80416a497c6SRafael Auler /// Debug by printing overall metadata global numbers to check it is sane
80516a497c6SRafael Auler void printStats(const ProfileWriterContext &Ctx) {
806cc4b2fb6SRafael Auler   char StatMsg[BufSize];
807cc4b2fb6SRafael Auler   char *StatPtr = StatMsg;
80816a497c6SRafael Auler   StatPtr =
80916a497c6SRafael Auler       strCopy(StatPtr,
81016a497c6SRafael Auler               "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");
811cc4b2fb6SRafael Auler   StatPtr = intToStr(StatPtr,
81216a497c6SRafael Auler                      Ctx.FuncDescriptions -
81316a497c6SRafael Auler                          reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),
814cc4b2fb6SRafael Auler                      10);
815cc4b2fb6SRafael Auler   StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");
816cc4b2fb6SRafael Auler   StatPtr = intToStr(
817cc4b2fb6SRafael Auler       StatPtr,
81816a497c6SRafael Auler       reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);
81916a497c6SRafael Auler   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");
82016a497c6SRafael Auler   StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);
821cc4b2fb6SRafael Auler   StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");
822cc4b2fb6SRafael Auler   StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);
823cc4b2fb6SRafael Auler   StatPtr = strCopy(StatPtr, "\n");
824cc4b2fb6SRafael Auler   __write(2, StatMsg, StatPtr - StatMsg);
825cc4b2fb6SRafael Auler }
826a0dd5b05SAlexander Shaposhnikov #endif
827a0dd5b05SAlexander Shaposhnikov 
828cc4b2fb6SRafael Auler 
829cc4b2fb6SRafael Auler /// This is part of a simple CFG representation in memory, where we store
830cc4b2fb6SRafael Auler /// a dynamically sized array of input and output edges per node, and store
831cc4b2fb6SRafael Auler /// a dynamically sized array of nodes per graph. We also store the spanning
832cc4b2fb6SRafael Auler /// tree edges for that CFG in a separate array of nodes in
833cc4b2fb6SRafael Auler /// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
834cc4b2fb6SRafael Auler struct Edge {
835cc4b2fb6SRafael Auler   uint32_t Node; // Index in nodes array regarding the destination of this edge
836cc4b2fb6SRafael Auler   uint32_t ID;   // Edge index in an array comprising all edges of the graph
837cc4b2fb6SRafael Auler };
838cc4b2fb6SRafael Auler 
839cc4b2fb6SRafael Auler /// A regular graph node or a spanning tree node
840cc4b2fb6SRafael Auler struct Node {
841cc4b2fb6SRafael Auler   uint32_t NumInEdges{0};  // Input edge count used to size InEdge
842cc4b2fb6SRafael Auler   uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges
843cc4b2fb6SRafael Auler   Edge *InEdges{nullptr};  // Created and managed by \p Graph
844cc4b2fb6SRafael Auler   Edge *OutEdges{nullptr}; // ditto
845cc4b2fb6SRafael Auler };
846cc4b2fb6SRafael Auler 
847cc4b2fb6SRafael Auler /// Main class for CFG representation in memory. Manages object creation and
848cc4b2fb6SRafael Auler /// destruction, populates an array of CFG nodes as well as corresponding
849cc4b2fb6SRafael Auler /// spanning tree nodes.
850cc4b2fb6SRafael Auler struct Graph {
851cc4b2fb6SRafael Auler   uint32_t NumNodes;
852cc4b2fb6SRafael Auler   Node *CFGNodes;
853cc4b2fb6SRafael Auler   Node *SpanningTreeNodes;
85416a497c6SRafael Auler   uint64_t *EdgeFreqs;
85516a497c6SRafael Auler   uint64_t *CallFreqs;
856cc4b2fb6SRafael Auler   BumpPtrAllocator &Alloc;
85716a497c6SRafael Auler   const FunctionDescription &D;
858cc4b2fb6SRafael Auler 
85916a497c6SRafael Auler   /// Reads a list of edges from function description \p D and builds
860cc4b2fb6SRafael Auler   /// the graph from it. Allocates several internal dynamic structures that are
86116a497c6SRafael Auler   /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all
862cc4b2fb6SRafael Auler   /// spanning tree leaf nodes descriptions (their counters). They are the seed
863cc4b2fb6SRafael Auler   /// used to compute the rest of the missing edge counts in a bottom-up
864cc4b2fb6SRafael Auler   /// traversal of the spanning tree.
86516a497c6SRafael Auler   Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
86616a497c6SRafael Auler         const uint64_t *Counters, ProfileWriterContext &Ctx);
867cc4b2fb6SRafael Auler   ~Graph();
868cc4b2fb6SRafael Auler   void dump() const;
86916a497c6SRafael Auler 
87016a497c6SRafael Auler private:
87116a497c6SRafael Auler   void computeEdgeFrequencies(const uint64_t *Counters,
87216a497c6SRafael Auler                               ProfileWriterContext &Ctx);
87316a497c6SRafael Auler   void dumpEdgeFreqs() const;
874cc4b2fb6SRafael Auler };
875cc4b2fb6SRafael Auler 
87616a497c6SRafael Auler Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
87716a497c6SRafael Auler              const uint64_t *Counters, ProfileWriterContext &Ctx)
87816a497c6SRafael Auler     : Alloc(Alloc), D(D) {
879cc4b2fb6SRafael Auler   DEBUG(reportNumber("G = 0x", (uint64_t)this, 16));
880cc4b2fb6SRafael Auler   // First pass to determine number of nodes
88116a497c6SRafael Auler   int32_t MaxNodes = -1;
88216a497c6SRafael Auler   CallFreqs = nullptr;
88316a497c6SRafael Auler   EdgeFreqs = nullptr;
88416a497c6SRafael Auler   for (int I = 0; I < D.NumEdges; ++I) {
88516a497c6SRafael Auler     if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)
88616a497c6SRafael Auler       MaxNodes = D.Edges[I].FromNode;
88716a497c6SRafael Auler     if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)
88816a497c6SRafael Auler       MaxNodes = D.Edges[I].ToNode;
889cc4b2fb6SRafael Auler   }
890a0dd5b05SAlexander Shaposhnikov 
891883bf0e8SAmir Ayupov   for (int I = 0; I < D.NumLeafNodes; ++I)
89216a497c6SRafael Auler     if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)
89316a497c6SRafael Auler       MaxNodes = D.LeafNodes[I].Node;
894883bf0e8SAmir Ayupov 
895883bf0e8SAmir Ayupov   for (int I = 0; I < D.NumCalls; ++I)
89616a497c6SRafael Auler     if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)
89716a497c6SRafael Auler       MaxNodes = D.Calls[I].FromNode;
898883bf0e8SAmir Ayupov 
89916a497c6SRafael Auler   // No nodes? Nothing to do
90016a497c6SRafael Auler   if (MaxNodes < 0) {
90116a497c6SRafael Auler     DEBUG(report("No nodes!\n"));
902cc4b2fb6SRafael Auler     CFGNodes = nullptr;
903cc4b2fb6SRafael Auler     SpanningTreeNodes = nullptr;
904cc4b2fb6SRafael Auler     NumNodes = 0;
905cc4b2fb6SRafael Auler     return;
906cc4b2fb6SRafael Auler   }
907cc4b2fb6SRafael Auler   ++MaxNodes;
908cc4b2fb6SRafael Auler   DEBUG(reportNumber("NumNodes = ", MaxNodes, 10));
90916a497c6SRafael Auler   NumNodes = static_cast<uint32_t>(MaxNodes);
910cc4b2fb6SRafael Auler 
911cc4b2fb6SRafael Auler   // Initial allocations
912cc4b2fb6SRafael Auler   CFGNodes = new (Alloc) Node[MaxNodes];
913a0dd5b05SAlexander Shaposhnikov 
914cc4b2fb6SRafael Auler   DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16));
915cc4b2fb6SRafael Auler   SpanningTreeNodes = new (Alloc) Node[MaxNodes];
916cc4b2fb6SRafael Auler   DEBUG(reportNumber("G->SpanningTreeNodes = 0x",
917cc4b2fb6SRafael Auler                      (uint64_t)SpanningTreeNodes, 16));
918cc4b2fb6SRafael Auler 
919cc4b2fb6SRafael Auler   // Figure out how much to allocate to each vector (in/out edge sets)
92016a497c6SRafael Auler   for (int I = 0; I < D.NumEdges; ++I) {
92116a497c6SRafael Auler     CFGNodes[D.Edges[I].FromNode].NumOutEdges++;
92216a497c6SRafael Auler     CFGNodes[D.Edges[I].ToNode].NumInEdges++;
92316a497c6SRafael Auler     if (D.Edges[I].Counter != 0xffffffff)
924cc4b2fb6SRafael Auler       continue;
925cc4b2fb6SRafael Auler 
92616a497c6SRafael Auler     SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;
92716a497c6SRafael Auler     SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;
928cc4b2fb6SRafael Auler   }
929cc4b2fb6SRafael Auler 
930cc4b2fb6SRafael Auler   // Allocate in/out edge sets
931cc4b2fb6SRafael Auler   for (int I = 0; I < MaxNodes; ++I) {
932cc4b2fb6SRafael Auler     if (CFGNodes[I].NumInEdges > 0)
933cc4b2fb6SRafael Auler       CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];
934cc4b2fb6SRafael Auler     if (CFGNodes[I].NumOutEdges > 0)
935cc4b2fb6SRafael Auler       CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];
936cc4b2fb6SRafael Auler     if (SpanningTreeNodes[I].NumInEdges > 0)
937cc4b2fb6SRafael Auler       SpanningTreeNodes[I].InEdges =
938cc4b2fb6SRafael Auler           new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];
939cc4b2fb6SRafael Auler     if (SpanningTreeNodes[I].NumOutEdges > 0)
940cc4b2fb6SRafael Auler       SpanningTreeNodes[I].OutEdges =
941cc4b2fb6SRafael Auler           new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];
942cc4b2fb6SRafael Auler     CFGNodes[I].NumInEdges = 0;
943cc4b2fb6SRafael Auler     CFGNodes[I].NumOutEdges = 0;
944cc4b2fb6SRafael Auler     SpanningTreeNodes[I].NumInEdges = 0;
945cc4b2fb6SRafael Auler     SpanningTreeNodes[I].NumOutEdges = 0;
946cc4b2fb6SRafael Auler   }
947cc4b2fb6SRafael Auler 
948cc4b2fb6SRafael Auler   // Fill in/out edge sets
94916a497c6SRafael Auler   for (int I = 0; I < D.NumEdges; ++I) {
95016a497c6SRafael Auler     const uint32_t Src = D.Edges[I].FromNode;
95116a497c6SRafael Auler     const uint32_t Dst = D.Edges[I].ToNode;
952cc4b2fb6SRafael Auler     Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];
953cc4b2fb6SRafael Auler     E->Node = Dst;
954cc4b2fb6SRafael Auler     E->ID = I;
955cc4b2fb6SRafael Auler 
956cc4b2fb6SRafael Auler     E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];
957cc4b2fb6SRafael Auler     E->Node = Src;
958cc4b2fb6SRafael Auler     E->ID = I;
959cc4b2fb6SRafael Auler 
96016a497c6SRafael Auler     if (D.Edges[I].Counter != 0xffffffff)
961cc4b2fb6SRafael Auler       continue;
962cc4b2fb6SRafael Auler 
963cc4b2fb6SRafael Auler     E = &SpanningTreeNodes[Src]
964cc4b2fb6SRafael Auler              .OutEdges[SpanningTreeNodes[Src].NumOutEdges++];
965cc4b2fb6SRafael Auler     E->Node = Dst;
966cc4b2fb6SRafael Auler     E->ID = I;
967cc4b2fb6SRafael Auler 
968cc4b2fb6SRafael Auler     E = &SpanningTreeNodes[Dst]
969cc4b2fb6SRafael Auler              .InEdges[SpanningTreeNodes[Dst].NumInEdges++];
970cc4b2fb6SRafael Auler     E->Node = Src;
971cc4b2fb6SRafael Auler     E->ID = I;
972cc4b2fb6SRafael Auler   }
97316a497c6SRafael Auler 
97416a497c6SRafael Auler   computeEdgeFrequencies(Counters, Ctx);
975cc4b2fb6SRafael Auler }
976cc4b2fb6SRafael Auler 
977cc4b2fb6SRafael Auler Graph::~Graph() {
97816a497c6SRafael Auler   if (CallFreqs)
97916a497c6SRafael Auler     Alloc.deallocate(CallFreqs);
98016a497c6SRafael Auler   if (EdgeFreqs)
98116a497c6SRafael Auler     Alloc.deallocate(EdgeFreqs);
982cc4b2fb6SRafael Auler   for (int I = NumNodes - 1; I >= 0; --I) {
983cc4b2fb6SRafael Auler     if (SpanningTreeNodes[I].OutEdges)
984cc4b2fb6SRafael Auler       Alloc.deallocate(SpanningTreeNodes[I].OutEdges);
985cc4b2fb6SRafael Auler     if (SpanningTreeNodes[I].InEdges)
986cc4b2fb6SRafael Auler       Alloc.deallocate(SpanningTreeNodes[I].InEdges);
987cc4b2fb6SRafael Auler     if (CFGNodes[I].OutEdges)
988cc4b2fb6SRafael Auler       Alloc.deallocate(CFGNodes[I].OutEdges);
989cc4b2fb6SRafael Auler     if (CFGNodes[I].InEdges)
990cc4b2fb6SRafael Auler       Alloc.deallocate(CFGNodes[I].InEdges);
991cc4b2fb6SRafael Auler   }
992cc4b2fb6SRafael Auler   if (SpanningTreeNodes)
993cc4b2fb6SRafael Auler     Alloc.deallocate(SpanningTreeNodes);
994cc4b2fb6SRafael Auler   if (CFGNodes)
995cc4b2fb6SRafael Auler     Alloc.deallocate(CFGNodes);
996cc4b2fb6SRafael Auler }
997cc4b2fb6SRafael Auler 
998cc4b2fb6SRafael Auler void Graph::dump() const {
999cc4b2fb6SRafael Auler   reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);
1000cc4b2fb6SRafael Auler   report("  Full graph:\n");
1001cc4b2fb6SRafael Auler   for (int I = 0; I < NumNodes; ++I) {
1002cc4b2fb6SRafael Auler     const Node *N = &CFGNodes[I];
1003cc4b2fb6SRafael Auler     reportNumber("    Node #", I, 10);
1004cc4b2fb6SRafael Auler     reportNumber("      InEdges total ", N->NumInEdges, 10);
1005cc4b2fb6SRafael Auler     for (int J = 0; J < N->NumInEdges; ++J)
1006cc4b2fb6SRafael Auler       reportNumber("        ", N->InEdges[J].Node, 10);
1007cc4b2fb6SRafael Auler     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
1008cc4b2fb6SRafael Auler     for (int J = 0; J < N->NumOutEdges; ++J)
1009cc4b2fb6SRafael Auler       reportNumber("        ", N->OutEdges[J].Node, 10);
1010cc4b2fb6SRafael Auler     report("\n");
1011cc4b2fb6SRafael Auler   }
1012cc4b2fb6SRafael Auler   report("  Spanning tree:\n");
1013cc4b2fb6SRafael Auler   for (int I = 0; I < NumNodes; ++I) {
1014cc4b2fb6SRafael Auler     const Node *N = &SpanningTreeNodes[I];
1015cc4b2fb6SRafael Auler     reportNumber("    Node #", I, 10);
1016cc4b2fb6SRafael Auler     reportNumber("      InEdges total ", N->NumInEdges, 10);
1017cc4b2fb6SRafael Auler     for (int J = 0; J < N->NumInEdges; ++J)
1018cc4b2fb6SRafael Auler       reportNumber("        ", N->InEdges[J].Node, 10);
1019cc4b2fb6SRafael Auler     reportNumber("      OutEdges total ", N->NumOutEdges, 10);
1020cc4b2fb6SRafael Auler     for (int J = 0; J < N->NumOutEdges; ++J)
1021cc4b2fb6SRafael Auler       reportNumber("        ", N->OutEdges[J].Node, 10);
1022cc4b2fb6SRafael Auler     report("\n");
1023cc4b2fb6SRafael Auler   }
1024cc4b2fb6SRafael Auler }
1025cc4b2fb6SRafael Auler 
102616a497c6SRafael Auler void Graph::dumpEdgeFreqs() const {
102716a497c6SRafael Auler   reportNumber(
102816a497c6SRafael Auler       "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);
102916a497c6SRafael Auler   for (int I = 0; I < D.NumEdges; ++I) {
103016a497c6SRafael Auler     reportNumber("* Src: ", D.Edges[I].FromNode, 10);
103116a497c6SRafael Auler     reportNumber("  Dst: ", D.Edges[I].ToNode, 10);
1032cc4b2fb6SRafael Auler     reportNumber("    Cnt: ", EdgeFreqs[I], 10);
1033cc4b2fb6SRafael Auler   }
1034cc4b2fb6SRafael Auler }
1035cc4b2fb6SRafael Auler 
103616a497c6SRafael Auler /// Auxiliary map structure for fast lookups of which calls map to each node of
103716a497c6SRafael Auler /// the function CFG
103816a497c6SRafael Auler struct NodeToCallsMap {
103916a497c6SRafael Auler   struct MapEntry {
104016a497c6SRafael Auler     uint32_t NumCalls;
104116a497c6SRafael Auler     uint32_t *Calls;
104216a497c6SRafael Auler   };
104316a497c6SRafael Auler   MapEntry *Entries;
104416a497c6SRafael Auler   BumpPtrAllocator &Alloc;
104516a497c6SRafael Auler   const uint32_t NumNodes;
1046cc4b2fb6SRafael Auler 
104716a497c6SRafael Auler   NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,
104816a497c6SRafael Auler                  uint32_t NumNodes)
104916a497c6SRafael Auler       : Alloc(Alloc), NumNodes(NumNodes) {
105016a497c6SRafael Auler     Entries = new (Alloc, 0) MapEntry[NumNodes];
105116a497c6SRafael Auler     for (int I = 0; I < D.NumCalls; ++I) {
105216a497c6SRafael Auler       DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10));
105316a497c6SRafael Auler       ++Entries[D.Calls[I].FromNode].NumCalls;
105416a497c6SRafael Auler     }
105516a497c6SRafael Auler     for (int I = 0; I < NumNodes; ++I) {
105616a497c6SRafael Auler       Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)
105716a497c6SRafael Auler                                                    uint32_t[Entries[I].NumCalls]
105816a497c6SRafael Auler                                              : nullptr;
105916a497c6SRafael Auler       Entries[I].NumCalls = 0;
106016a497c6SRafael Auler     }
106116a497c6SRafael Auler     for (int I = 0; I < D.NumCalls; ++I) {
1062c7306cc2SAmir Ayupov       MapEntry &Entry = Entries[D.Calls[I].FromNode];
106316a497c6SRafael Auler       Entry.Calls[Entry.NumCalls++] = I;
106416a497c6SRafael Auler     }
106516a497c6SRafael Auler   }
106616a497c6SRafael Auler 
106716a497c6SRafael Auler   /// Set the frequency of all calls in node \p NodeID to Freq. However, if
106816a497c6SRafael Auler   /// the calls have their own counters and do not depend on the basic block
106916a497c6SRafael Auler   /// counter, this means they have landing pads and throw exceptions. In this
107016a497c6SRafael Auler   /// case, set their frequency with their counters and return the maximum
107116a497c6SRafael Auler   /// value observed in such counters. This will be used as the new frequency
107216a497c6SRafael Auler   /// at basic block entry. This is used to fix the CFG edge frequencies in the
107316a497c6SRafael Auler   /// presence of exceptions.
107416a497c6SRafael Auler   uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,
107516a497c6SRafael Auler                            const FunctionDescription &D,
107616a497c6SRafael Auler                            const uint64_t *Counters,
107716a497c6SRafael Auler                            ProfileWriterContext &Ctx) const {
1078c7306cc2SAmir Ayupov     const MapEntry &Entry = Entries[NodeID];
107916a497c6SRafael Auler     uint64_t MaxValue = 0ull;
108016a497c6SRafael Auler     for (int I = 0, E = Entry.NumCalls; I != E; ++I) {
1081c7306cc2SAmir Ayupov       const uint32_t CallID = Entry.Calls[I];
108216a497c6SRafael Auler       DEBUG(reportNumber("  Setting freq for call ID: ", CallID, 10));
1083c7306cc2SAmir Ayupov       const CallDescription &CallDesc = D.Calls[CallID];
108416a497c6SRafael Auler       if (CallDesc.Counter == 0xffffffff) {
108516a497c6SRafael Auler         CallFreqs[CallID] = Freq;
108616a497c6SRafael Auler         DEBUG(reportNumber("  with : ", Freq, 10));
108716a497c6SRafael Auler       } else {
1088c7306cc2SAmir Ayupov         const uint64_t CounterVal = Counters[CallDesc.Counter];
108916a497c6SRafael Auler         CallFreqs[CallID] = CounterVal;
109016a497c6SRafael Auler         MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;
109116a497c6SRafael Auler         DEBUG(reportNumber("  with (private counter) : ", CounterVal, 10));
109216a497c6SRafael Auler       }
109316a497c6SRafael Auler       DEBUG(reportNumber("  Address: 0x", CallDesc.TargetAddress, 16));
109416a497c6SRafael Auler       if (CallFreqs[CallID] > 0)
109516a497c6SRafael Auler         Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=
109616a497c6SRafael Auler             CallFreqs[CallID];
109716a497c6SRafael Auler     }
109816a497c6SRafael Auler     return MaxValue;
109916a497c6SRafael Auler   }
110016a497c6SRafael Auler 
110116a497c6SRafael Auler   ~NodeToCallsMap() {
1102883bf0e8SAmir Ayupov     for (int I = NumNodes - 1; I >= 0; --I)
110316a497c6SRafael Auler       if (Entries[I].Calls)
110416a497c6SRafael Auler         Alloc.deallocate(Entries[I].Calls);
110516a497c6SRafael Auler     Alloc.deallocate(Entries);
110616a497c6SRafael Auler   }
110716a497c6SRafael Auler };
110816a497c6SRafael Auler 
110916a497c6SRafael Auler /// Fill an array with the frequency of each edge in the function represented
111016a497c6SRafael Auler /// by G, as well as another array for each call.
111116a497c6SRafael Auler void Graph::computeEdgeFrequencies(const uint64_t *Counters,
111216a497c6SRafael Auler                                    ProfileWriterContext &Ctx) {
111316a497c6SRafael Auler   if (NumNodes == 0)
111416a497c6SRafael Auler     return;
111516a497c6SRafael Auler 
111616a497c6SRafael Auler   EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;
111716a497c6SRafael Auler   CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;
111816a497c6SRafael Auler 
111916a497c6SRafael Auler   // Setup a lookup for calls present in each node (BB)
112016a497c6SRafael Auler   NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);
1121cc4b2fb6SRafael Auler 
1122cc4b2fb6SRafael Auler   // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the
1123cc4b2fb6SRafael Auler   // spanning tree don't have explicit counters. We must infer their value using
1124cc4b2fb6SRafael Auler   // a linear combination of other counters (sum of counters of the outgoing
1125cc4b2fb6SRafael Auler   // edges minus sum of counters of the incoming edges).
112616a497c6SRafael Auler   uint32_t *Stack = new (Alloc) uint32_t [NumNodes];
1127cc4b2fb6SRafael Auler   uint32_t StackTop = 0;
1128cc4b2fb6SRafael Auler   enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };
112916a497c6SRafael Auler   Status *Visited = new (Alloc, 0) Status[NumNodes];
113016a497c6SRafael Auler   uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];
113116a497c6SRafael Auler   uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];
1132cc4b2fb6SRafael Auler 
1133cc4b2fb6SRafael Auler   // Setup a fast lookup for frequency of leaf nodes, which have special
1134cc4b2fb6SRafael Auler   // basic block frequency instrumentation (they are not edge profiled).
113516a497c6SRafael Auler   for (int I = 0; I < D.NumLeafNodes; ++I) {
113616a497c6SRafael Auler     LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];
1137cc4b2fb6SRafael Auler     DEBUG({
113816a497c6SRafael Auler       if (Counters[D.LeafNodes[I].Counter] > 0) {
113916a497c6SRafael Auler         reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);
114016a497c6SRafael Auler         reportNumber("     Counter: ", Counters[D.LeafNodes[I].Counter], 10);
1141cc4b2fb6SRafael Auler       }
1142cc4b2fb6SRafael Auler     });
114316a497c6SRafael Auler   }
114416a497c6SRafael Auler   for (int I = 0; I < D.NumEntryNodes; ++I) {
114516a497c6SRafael Auler     EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;
114616a497c6SRafael Auler     DEBUG({
114716a497c6SRafael Auler         reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);
114816a497c6SRafael Auler         reportNumber("      Address: ", D.EntryNodes[I].Address, 16);
114916a497c6SRafael Auler     });
1150cc4b2fb6SRafael Auler   }
1151cc4b2fb6SRafael Auler   // Add all root nodes to the stack
1152883bf0e8SAmir Ayupov   for (int I = 0; I < NumNodes; ++I)
115316a497c6SRafael Auler     if (SpanningTreeNodes[I].NumInEdges == 0)
1154cc4b2fb6SRafael Auler       Stack[StackTop++] = I;
1155883bf0e8SAmir Ayupov 
1156cc4b2fb6SRafael Auler   // Empty stack?
1157cc4b2fb6SRafael Auler   if (StackTop == 0) {
115816a497c6SRafael Auler     DEBUG(report("Empty stack!\n"));
115916a497c6SRafael Auler     Alloc.deallocate(EntryAddress);
1160cc4b2fb6SRafael Auler     Alloc.deallocate(LeafFrequency);
1161cc4b2fb6SRafael Auler     Alloc.deallocate(Visited);
1162cc4b2fb6SRafael Auler     Alloc.deallocate(Stack);
116316a497c6SRafael Auler     CallMap->~NodeToCallsMap();
116416a497c6SRafael Auler     Alloc.deallocate(CallMap);
116516a497c6SRafael Auler     if (CallFreqs)
116616a497c6SRafael Auler       Alloc.deallocate(CallFreqs);
116716a497c6SRafael Auler     if (EdgeFreqs)
116816a497c6SRafael Auler       Alloc.deallocate(EdgeFreqs);
116916a497c6SRafael Auler     EdgeFreqs = nullptr;
117016a497c6SRafael Auler     CallFreqs = nullptr;
117116a497c6SRafael Auler     return;
1172cc4b2fb6SRafael Auler   }
1173cc4b2fb6SRafael Auler   // Add all known edge counts, will infer the rest
117416a497c6SRafael Auler   for (int I = 0; I < D.NumEdges; ++I) {
117516a497c6SRafael Auler     const uint32_t C = D.Edges[I].Counter;
1176cc4b2fb6SRafael Auler     if (C == 0xffffffff) // inferred counter - we will compute its value
1177cc4b2fb6SRafael Auler       continue;
117816a497c6SRafael Auler     EdgeFreqs[I] = Counters[C];
1179cc4b2fb6SRafael Auler   }
1180cc4b2fb6SRafael Auler 
1181cc4b2fb6SRafael Auler   while (StackTop > 0) {
1182cc4b2fb6SRafael Auler     const uint32_t Cur = Stack[--StackTop];
1183cc4b2fb6SRafael Auler     DEBUG({
1184cc4b2fb6SRafael Auler       if (Visited[Cur] == S_VISITING)
1185cc4b2fb6SRafael Auler         report("(visiting) ");
1186cc4b2fb6SRafael Auler       else
1187cc4b2fb6SRafael Auler         report("(new) ");
1188cc4b2fb6SRafael Auler       reportNumber("Cur: ", Cur, 10);
1189cc4b2fb6SRafael Auler     });
1190cc4b2fb6SRafael Auler 
1191cc4b2fb6SRafael Auler     // This shouldn't happen in a tree
1192cc4b2fb6SRafael Auler     assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");
1193cc4b2fb6SRafael Auler     if (Visited[Cur] == S_NEW) {
1194cc4b2fb6SRafael Auler       Visited[Cur] = S_VISITING;
1195cc4b2fb6SRafael Auler       Stack[StackTop++] = Cur;
119616a497c6SRafael Auler       assert(StackTop <= NumNodes, "stack grew too large");
119716a497c6SRafael Auler       for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {
119816a497c6SRafael Auler         const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;
1199cc4b2fb6SRafael Auler         Stack[StackTop++] = Succ;
120016a497c6SRafael Auler         assert(StackTop <= NumNodes, "stack grew too large");
1201cc4b2fb6SRafael Auler       }
1202cc4b2fb6SRafael Auler       continue;
1203cc4b2fb6SRafael Auler     }
1204cc4b2fb6SRafael Auler     Visited[Cur] = S_VISITED;
1205cc4b2fb6SRafael Auler 
1206cc4b2fb6SRafael Auler     // Establish our node frequency based on outgoing edges, which should all be
1207cc4b2fb6SRafael Auler     // resolved by now.
1208cc4b2fb6SRafael Auler     int64_t CurNodeFreq = LeafFrequency[Cur];
1209cc4b2fb6SRafael Auler     // Not a leaf?
1210cc4b2fb6SRafael Auler     if (!CurNodeFreq) {
121116a497c6SRafael Auler       for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {
121216a497c6SRafael Auler         const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;
121316a497c6SRafael Auler         CurNodeFreq += EdgeFreqs[SuccEdge];
1214cc4b2fb6SRafael Auler       }
1215cc4b2fb6SRafael Auler     }
121616a497c6SRafael Auler     if (CurNodeFreq < 0)
121716a497c6SRafael Auler       CurNodeFreq = 0;
121816a497c6SRafael Auler 
121916a497c6SRafael Auler     const uint64_t CallFreq = CallMap->visitAllCallsIn(
122016a497c6SRafael Auler         Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx);
122116a497c6SRafael Auler 
122216a497c6SRafael Auler     // Exception handling affected our output flow? Fix with calls info
122316a497c6SRafael Auler     DEBUG({
122416a497c6SRafael Auler       if (CallFreq > CurNodeFreq)
122516a497c6SRafael Auler         report("Bumping node frequency with call info\n");
122616a497c6SRafael Auler     });
122716a497c6SRafael Auler     CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;
122816a497c6SRafael Auler 
122916a497c6SRafael Auler     if (CurNodeFreq > 0) {
123016a497c6SRafael Auler       if (uint64_t Addr = EntryAddress[Cur]) {
123116a497c6SRafael Auler         DEBUG(
123216a497c6SRafael Auler             reportNumber("  Setting flow at entry point address 0x", Addr, 16));
123316a497c6SRafael Auler         DEBUG(reportNumber("  with: ", CurNodeFreq, 10));
123416a497c6SRafael Auler         Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;
123516a497c6SRafael Auler       }
123616a497c6SRafael Auler     }
123716a497c6SRafael Auler 
123816a497c6SRafael Auler     // No parent? Reached a tree root, limit to call frequency updating.
1239883bf0e8SAmir Ayupov     if (SpanningTreeNodes[Cur].NumInEdges == 0)
124016a497c6SRafael Auler       continue;
124116a497c6SRafael Auler 
124216a497c6SRafael Auler     assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
124316a497c6SRafael Auler     const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
124416a497c6SRafael Auler     const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
124516a497c6SRafael Auler 
1246cc4b2fb6SRafael Auler     // Calculate parent edge freq.
124716a497c6SRafael Auler     int64_t ParentEdgeFreq = CurNodeFreq;
124816a497c6SRafael Auler     for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {
124916a497c6SRafael Auler       const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;
125016a497c6SRafael Auler       ParentEdgeFreq -= EdgeFreqs[PredEdge];
1251cc4b2fb6SRafael Auler     }
125216a497c6SRafael Auler 
1253cc4b2fb6SRafael Auler     // Sometimes the conservative CFG that BOLT builds will lead to incorrect
1254cc4b2fb6SRafael Auler     // flow computation. For example, in a BB that transitively calls the exit
1255cc4b2fb6SRafael Auler     // syscall, BOLT will add a fall-through successor even though it should not
1256cc4b2fb6SRafael Auler     // have any successors. So this block execution will likely be wrong. We
1257cc4b2fb6SRafael Auler     // tolerate this imperfection since this case should be quite infrequent.
1258cc4b2fb6SRafael Auler     if (ParentEdgeFreq < 0) {
125916a497c6SRafael Auler       DEBUG(dumpEdgeFreqs());
1260cc4b2fb6SRafael Auler       DEBUG(report("WARNING: incorrect flow"));
1261cc4b2fb6SRafael Auler       ParentEdgeFreq = 0;
1262cc4b2fb6SRafael Auler     }
1263cc4b2fb6SRafael Auler     DEBUG(reportNumber("  Setting freq for ParentEdge: ", ParentEdge, 10));
1264cc4b2fb6SRafael Auler     DEBUG(reportNumber("  with ParentEdgeFreq: ", ParentEdgeFreq, 10));
126516a497c6SRafael Auler     EdgeFreqs[ParentEdge] = ParentEdgeFreq;
1266cc4b2fb6SRafael Auler   }
1267cc4b2fb6SRafael Auler 
126816a497c6SRafael Auler   Alloc.deallocate(EntryAddress);
1269cc4b2fb6SRafael Auler   Alloc.deallocate(LeafFrequency);
1270cc4b2fb6SRafael Auler   Alloc.deallocate(Visited);
1271cc4b2fb6SRafael Auler   Alloc.deallocate(Stack);
127216a497c6SRafael Auler   CallMap->~NodeToCallsMap();
127316a497c6SRafael Auler   Alloc.deallocate(CallMap);
127416a497c6SRafael Auler   DEBUG(dumpEdgeFreqs());
1275cc4b2fb6SRafael Auler }
1276cc4b2fb6SRafael Auler 
127716a497c6SRafael Auler /// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
127816a497c6SRafael Auler /// \p Alloc to allocate helper dynamic structures used to compute profile for
127916a497c6SRafael Auler /// edges that we do not explictly instrument.
128016a497c6SRafael Auler const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,
128116a497c6SRafael Auler                                     const uint8_t *FuncDesc,
128216a497c6SRafael Auler                                     BumpPtrAllocator &Alloc) {
128316a497c6SRafael Auler   const FunctionDescription F(FuncDesc);
128416a497c6SRafael Auler   const uint8_t *next = FuncDesc + F.getSize();
1285cc4b2fb6SRafael Auler 
1286a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
1287a0dd5b05SAlexander Shaposhnikov   uint64_t *bolt_instr_locations = __bolt_instr_locations;
1288a0dd5b05SAlexander Shaposhnikov #else
1289a0dd5b05SAlexander Shaposhnikov   uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();
1290a0dd5b05SAlexander Shaposhnikov #endif
1291a0dd5b05SAlexander Shaposhnikov 
1292cc4b2fb6SRafael Auler   // Skip funcs we know are cold
1293cc4b2fb6SRafael Auler #ifndef ENABLE_DEBUG
129416a497c6SRafael Auler   uint64_t CountersFreq = 0;
1295883bf0e8SAmir Ayupov   for (int I = 0; I < F.NumLeafNodes; ++I)
1296a0dd5b05SAlexander Shaposhnikov     CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];
1297883bf0e8SAmir Ayupov 
129816a497c6SRafael Auler   if (CountersFreq == 0) {
129916a497c6SRafael Auler     for (int I = 0; I < F.NumEdges; ++I) {
130016a497c6SRafael Auler       const uint32_t C = F.Edges[I].Counter;
130116a497c6SRafael Auler       if (C == 0xffffffff)
130216a497c6SRafael Auler         continue;
1303a0dd5b05SAlexander Shaposhnikov       CountersFreq += bolt_instr_locations[C];
130416a497c6SRafael Auler     }
130516a497c6SRafael Auler     if (CountersFreq == 0) {
130616a497c6SRafael Auler       for (int I = 0; I < F.NumCalls; ++I) {
130716a497c6SRafael Auler         const uint32_t C = F.Calls[I].Counter;
130816a497c6SRafael Auler         if (C == 0xffffffff)
130916a497c6SRafael Auler           continue;
1310a0dd5b05SAlexander Shaposhnikov         CountersFreq += bolt_instr_locations[C];
131116a497c6SRafael Auler       }
131216a497c6SRafael Auler       if (CountersFreq == 0)
1313cc4b2fb6SRafael Auler         return next;
131416a497c6SRafael Auler     }
131516a497c6SRafael Auler   }
1316cc4b2fb6SRafael Auler #endif
1317cc4b2fb6SRafael Auler 
1318a0dd5b05SAlexander Shaposhnikov   Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);
1319cc4b2fb6SRafael Auler   DEBUG(G->dump());
1320a0dd5b05SAlexander Shaposhnikov 
132116a497c6SRafael Auler   if (!G->EdgeFreqs && !G->CallFreqs) {
1322cc4b2fb6SRafael Auler     G->~Graph();
1323cc4b2fb6SRafael Auler     Alloc.deallocate(G);
1324cc4b2fb6SRafael Auler     return next;
1325cc4b2fb6SRafael Auler   }
1326cc4b2fb6SRafael Auler 
132716a497c6SRafael Auler   for (int I = 0; I < F.NumEdges; ++I) {
132816a497c6SRafael Auler     const uint64_t Freq = G->EdgeFreqs[I];
1329cc4b2fb6SRafael Auler     if (Freq == 0)
1330cc4b2fb6SRafael Auler       continue;
133116a497c6SRafael Auler     const EdgeDescription *Desc = &F.Edges[I];
1332cc4b2fb6SRafael Auler     char LineBuf[BufSize];
1333cc4b2fb6SRafael Auler     char *Ptr = LineBuf;
133416a497c6SRafael Auler     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
133516a497c6SRafael Auler     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1336cc4b2fb6SRafael Auler     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);
1337cc4b2fb6SRafael Auler     Ptr = intToStr(Ptr, Freq, 10);
1338cc4b2fb6SRafael Auler     *Ptr++ = '\n';
1339cc4b2fb6SRafael Auler     __write(FD, LineBuf, Ptr - LineBuf);
1340cc4b2fb6SRafael Auler   }
1341cc4b2fb6SRafael Auler 
134216a497c6SRafael Auler   for (int I = 0; I < F.NumCalls; ++I) {
134316a497c6SRafael Auler     const uint64_t Freq = G->CallFreqs[I];
134416a497c6SRafael Auler     if (Freq == 0)
134516a497c6SRafael Auler       continue;
134616a497c6SRafael Auler     char LineBuf[BufSize];
134716a497c6SRafael Auler     char *Ptr = LineBuf;
134816a497c6SRafael Auler     const CallDescription *Desc = &F.Calls[I];
134916a497c6SRafael Auler     Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
135016a497c6SRafael Auler     Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
135116a497c6SRafael Auler     Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
135216a497c6SRafael Auler     Ptr = intToStr(Ptr, Freq, 10);
135316a497c6SRafael Auler     *Ptr++ = '\n';
135416a497c6SRafael Auler     __write(FD, LineBuf, Ptr - LineBuf);
135516a497c6SRafael Auler   }
135616a497c6SRafael Auler 
1357cc4b2fb6SRafael Auler   G->~Graph();
1358cc4b2fb6SRafael Auler   Alloc.deallocate(G);
1359cc4b2fb6SRafael Auler   return next;
1360cc4b2fb6SRafael Auler }
1361cc4b2fb6SRafael Auler 
1362a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
136316a497c6SRafael Auler const IndCallTargetDescription *
136416a497c6SRafael Auler ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {
136516a497c6SRafael Auler   uint32_t B = 0;
136616a497c6SRafael Auler   uint32_t E = __bolt_instr_num_ind_targets;
136716a497c6SRafael Auler   if (E == 0)
136816a497c6SRafael Auler     return nullptr;
136916a497c6SRafael Auler   do {
137016a497c6SRafael Auler     uint32_t I = (E - B) / 2 + B;
137116a497c6SRafael Auler     if (IndCallTargets[I].Address == Target)
137216a497c6SRafael Auler       return &IndCallTargets[I];
137316a497c6SRafael Auler     if (IndCallTargets[I].Address < Target)
137416a497c6SRafael Auler       B = I + 1;
137516a497c6SRafael Auler     else
137616a497c6SRafael Auler       E = I;
137716a497c6SRafael Auler   } while (B < E);
137816a497c6SRafael Auler   return nullptr;
1379cc4b2fb6SRafael Auler }
138062aa74f8SRafael Auler 
138116a497c6SRafael Auler /// Write a single indirect call <src, target> pair to the fdata file
138216a497c6SRafael Auler void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,
138316a497c6SRafael Auler                          int FD, int CallsiteID,
138416a497c6SRafael Auler                          ProfileWriterContext *Ctx) {
138516a497c6SRafael Auler   if (Entry.Val == 0)
138616a497c6SRafael Auler     return;
138716a497c6SRafael Auler   DEBUG(reportNumber("Target func 0x", Entry.Key, 16));
138816a497c6SRafael Auler   DEBUG(reportNumber("Target freq: ", Entry.Val, 10));
138916a497c6SRafael Auler   const IndCallDescription *CallsiteDesc =
139016a497c6SRafael Auler       &Ctx->IndCallDescriptions[CallsiteID];
139116a497c6SRafael Auler   const IndCallTargetDescription *TargetDesc =
139216a497c6SRafael Auler       Ctx->lookupIndCallTarget(Entry.Key);
139316a497c6SRafael Auler   if (!TargetDesc) {
139416a497c6SRafael Auler     DEBUG(report("Failed to lookup indirect call target\n"));
1395cc4b2fb6SRafael Auler     char LineBuf[BufSize];
139662aa74f8SRafael Auler     char *Ptr = LineBuf;
139716a497c6SRafael Auler     Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
139816a497c6SRafael Auler     Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);
139916a497c6SRafael Auler     Ptr = intToStr(Ptr, Entry.Val, 10);
140016a497c6SRafael Auler     *Ptr++ = '\n';
140116a497c6SRafael Auler     __write(FD, LineBuf, Ptr - LineBuf);
140216a497c6SRafael Auler     return;
140316a497c6SRafael Auler   }
140416a497c6SRafael Auler   Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;
140516a497c6SRafael Auler   char LineBuf[BufSize];
140616a497c6SRafael Auler   char *Ptr = LineBuf;
140716a497c6SRafael Auler   Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
140816a497c6SRafael Auler   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1409cc4b2fb6SRafael Auler   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
141016a497c6SRafael Auler   Ptr = intToStr(Ptr, Entry.Val, 10);
141162aa74f8SRafael Auler   *Ptr++ = '\n';
1412821480d2SRafael Auler   __write(FD, LineBuf, Ptr - LineBuf);
141362aa74f8SRafael Auler }
1414cc4b2fb6SRafael Auler 
141516a497c6SRafael Auler /// Write to \p FD all of the indirect call profiles.
141616a497c6SRafael Auler void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {
141716a497c6SRafael Auler   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {
141816a497c6SRafael Auler     DEBUG(reportNumber("IndCallsite #", I, 10));
141916a497c6SRafael Auler     GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);
142016a497c6SRafael Auler   }
142116a497c6SRafael Auler }
142216a497c6SRafael Auler 
142316a497c6SRafael Auler /// Check a single call flow for a callee versus all known callers. If there are
142416a497c6SRafael Auler /// less callers than what the callee expects, write the difference with source
142516a497c6SRafael Auler /// [unknown] in the profile.
142616a497c6SRafael Auler void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
142716a497c6SRafael Auler                         ProfileWriterContext *Ctx) {
142816a497c6SRafael Auler   DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16));
142916a497c6SRafael Auler   DEBUG(reportNumber("Calls: ", Entry.Calls, 10));
143016a497c6SRafael Auler   DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10));
143116a497c6SRafael Auler   DEBUG({
143216a497c6SRafael Auler     if (Entry.Calls > Entry.Val)
143316a497c6SRafael Auler       report("  More calls than expected!\n");
143416a497c6SRafael Auler   });
143516a497c6SRafael Auler   if (Entry.Val <= Entry.Calls)
143616a497c6SRafael Auler     return;
143716a497c6SRafael Auler   DEBUG(reportNumber(
143816a497c6SRafael Auler       "  Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10));
143916a497c6SRafael Auler   const IndCallTargetDescription *TargetDesc =
144016a497c6SRafael Auler       Ctx->lookupIndCallTarget(Entry.Key);
144116a497c6SRafael Auler   if (!TargetDesc) {
144216a497c6SRafael Auler     // There is probably something wrong with this callee and this should be
144316a497c6SRafael Auler     // investigated, but I don't want to assert and lose all data collected.
144416a497c6SRafael Auler     DEBUG(report("WARNING: failed to look up call target!\n"));
144516a497c6SRafael Auler     return;
144616a497c6SRafael Auler   }
144716a497c6SRafael Auler   char LineBuf[BufSize];
144816a497c6SRafael Auler   char *Ptr = LineBuf;
144916a497c6SRafael Auler   Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);
145016a497c6SRafael Auler   Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
145116a497c6SRafael Auler   Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
145216a497c6SRafael Auler   Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);
145316a497c6SRafael Auler   *Ptr++ = '\n';
145416a497c6SRafael Auler   __write(FD, LineBuf, Ptr - LineBuf);
145516a497c6SRafael Auler }
145616a497c6SRafael Auler 
145716a497c6SRafael Auler /// Open fdata file for writing and return a valid file descriptor, aborting
145816a497c6SRafael Auler /// program upon failure.
145916a497c6SRafael Auler int openProfile() {
146016a497c6SRafael Auler   // Build the profile name string by appending our PID
146116a497c6SRafael Auler   char Buf[BufSize];
146216a497c6SRafael Auler   char *Ptr = Buf;
146316a497c6SRafael Auler   uint64_t PID = __getpid();
146416a497c6SRafael Auler   Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
146516a497c6SRafael Auler   if (__bolt_instr_use_pid) {
146616a497c6SRafael Auler     Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
146716a497c6SRafael Auler     Ptr = intToStr(Ptr, PID, 10);
146816a497c6SRafael Auler     Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));
146916a497c6SRafael Auler   }
147016a497c6SRafael Auler   *Ptr++ = '\0';
147160bbddf3SDenis Revunov   uint64_t FD = __open(Buf, O_WRONLY | O_TRUNC | O_CREAT,
147216a497c6SRafael Auler                        /*mode=*/0666);
147316a497c6SRafael Auler   if (static_cast<int64_t>(FD) < 0) {
147416a497c6SRafael Auler     report("Error while trying to open profile file for writing: ");
147516a497c6SRafael Auler     report(Buf);
147616a497c6SRafael Auler     reportNumber("\nFailed with error number: 0x",
147716a497c6SRafael Auler                  0 - static_cast<int64_t>(FD), 16);
147816a497c6SRafael Auler     __exit(1);
147916a497c6SRafael Auler   }
148016a497c6SRafael Auler   return FD;
148116a497c6SRafael Auler }
1482a0dd5b05SAlexander Shaposhnikov 
1483a0dd5b05SAlexander Shaposhnikov #endif
1484a0dd5b05SAlexander Shaposhnikov 
148516a497c6SRafael Auler } // anonymous namespace
148616a497c6SRafael Auler 
1487a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
1488a0dd5b05SAlexander Shaposhnikov 
148916a497c6SRafael Auler /// Reset all counters in case you want to start profiling a new phase of your
149016a497c6SRafael Auler /// program independently of prior phases.
149116a497c6SRafael Auler /// The address of this function is printed by BOLT and this can be called by
149216a497c6SRafael Auler /// any attached debugger during runtime. There is a useful oneliner for gdb:
149316a497c6SRafael Auler ///
149416a497c6SRafael Auler ///   gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
149516a497c6SRafael Auler ///     -ex 'set confirm off' -ex quit
149616a497c6SRafael Auler ///
149716a497c6SRafael Auler /// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
149816a497c6SRafael Auler /// name.
149916a497c6SRafael Auler extern "C" void __bolt_instr_clear_counters() {
1500ea2182feSMaksim Panchenko   memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,
150116a497c6SRafael Auler          __bolt_num_counters * 8);
1502883bf0e8SAmir Ayupov   for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)
150316a497c6SRafael Auler     GlobalIndCallCounters[I].resetCounters();
150416a497c6SRafael Auler }
150516a497c6SRafael Auler 
150616a497c6SRafael Auler /// This is the entry point for profile writing.
150716a497c6SRafael Auler /// There are three ways of getting here:
150816a497c6SRafael Auler ///
150916a497c6SRafael Auler ///  * Program execution ended, finalization methods are running and BOLT
151016a497c6SRafael Auler ///    hooked into FINI from your binary dynamic section;
151116a497c6SRafael Auler ///  * You used the sleep timer option and during initialization we forked
151216a497c6SRafael Auler ///    a separete process that will call this function periodically;
151316a497c6SRafael Auler ///  * BOLT prints this function address so you can attach a debugger and
151416a497c6SRafael Auler ///    call this function directly to get your profile written to disk
151516a497c6SRafael Auler ///    on demand.
151616a497c6SRafael Auler ///
1517ad79d517SVasily Leonenko extern "C" void __attribute((force_align_arg_pointer))
1518*a7992981SDenis Revunov __bolt_instr_data_dump(int FD) {
151916a497c6SRafael Auler   // Already dumping
152016a497c6SRafael Auler   if (!GlobalWriteProfileMutex->acquire())
152116a497c6SRafael Auler     return;
152216a497c6SRafael Auler 
1523*a7992981SDenis Revunov   int ret = __lseek(FD, 0, SEEK_SET);
1524*a7992981SDenis Revunov   assert(ret == 0, "Failed to lseek!");
1525*a7992981SDenis Revunov   ret = __ftruncate(FD, 0);
1526*a7992981SDenis Revunov   assert(ret == 0, "Failed to ftruncate!");
152716a497c6SRafael Auler   BumpPtrAllocator HashAlloc;
152816a497c6SRafael Auler   HashAlloc.setMaxSize(0x6400000);
152916a497c6SRafael Auler   ProfileWriterContext Ctx = readDescriptions();
153016a497c6SRafael Auler   Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);
153116a497c6SRafael Auler 
153216a497c6SRafael Auler   DEBUG(printStats(Ctx));
153316a497c6SRafael Auler 
1534cc4b2fb6SRafael Auler   BumpPtrAllocator Alloc;
1535eaf1b566SJakub Beránek   Alloc.setMaxSize(0x6400000);
153616a497c6SRafael Auler   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1537cc4b2fb6SRafael Auler   for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {
153816a497c6SRafael Auler     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
153916a497c6SRafael Auler     Alloc.clear();
1540cc4b2fb6SRafael Auler     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1541cc4b2fb6SRafael Auler   }
154216a497c6SRafael Auler   assert(FuncDesc == (void *)Ctx.Strings,
1543cc4b2fb6SRafael Auler          "FuncDesc ptr must be equal to stringtable");
1544cc4b2fb6SRafael Auler 
154516a497c6SRafael Auler   writeIndirectCallProfile(FD, Ctx);
154616a497c6SRafael Auler   Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);
154716a497c6SRafael Auler 
1548dcdd37fdSVladislav Khmelevsky   __fsync(FD);
154916a497c6SRafael Auler   __munmap(Ctx.MMapPtr, Ctx.MMapSize);
155016a497c6SRafael Auler   __close(Ctx.FileDesc);
155116a497c6SRafael Auler   HashAlloc.destroy();
155216a497c6SRafael Auler   GlobalWriteProfileMutex->release();
155316a497c6SRafael Auler   DEBUG(report("Finished writing profile.\n"));
155416a497c6SRafael Auler }
155516a497c6SRafael Auler 
155616a497c6SRafael Auler /// Event loop for our child process spawned during setup to dump profile data
155716a497c6SRafael Auler /// at user-specified intervals
155816a497c6SRafael Auler void watchProcess() {
155916a497c6SRafael Auler   timespec ts, rem;
156016a497c6SRafael Auler   uint64_t Ellapsed = 0ull;
1561*a7992981SDenis Revunov   int FD = openProfile();
156276d346caSVladislav Khmelevsky   uint64_t ppid;
156376d346caSVladislav Khmelevsky   if (__bolt_instr_wait_forks) {
156476d346caSVladislav Khmelevsky     // Store parent pgid
156576d346caSVladislav Khmelevsky     ppid = -__getpgid(0);
156676d346caSVladislav Khmelevsky     // And leave parent process group
156776d346caSVladislav Khmelevsky     __setpgid(0, 0);
156876d346caSVladislav Khmelevsky   } else {
156976d346caSVladislav Khmelevsky     // Store parent pid
157076d346caSVladislav Khmelevsky     ppid = __getppid();
157176d346caSVladislav Khmelevsky     if (ppid == 1) {
157276d346caSVladislav Khmelevsky       // Parent already dead
1573*a7992981SDenis Revunov       __bolt_instr_data_dump(FD);
157476d346caSVladislav Khmelevsky       goto out;
157576d346caSVladislav Khmelevsky     }
157676d346caSVladislav Khmelevsky   }
157776d346caSVladislav Khmelevsky 
157816a497c6SRafael Auler   ts.tv_sec = 1;
157916a497c6SRafael Auler   ts.tv_nsec = 0;
158016a497c6SRafael Auler   while (1) {
158116a497c6SRafael Auler     __nanosleep(&ts, &rem);
158276d346caSVladislav Khmelevsky     // This means our parent process or all its forks are dead,
158376d346caSVladislav Khmelevsky     // so no need for us to keep dumping.
158476d346caSVladislav Khmelevsky     if (__kill(ppid, 0) < 0) {
158576d346caSVladislav Khmelevsky       if (__bolt_instr_no_counters_clear)
1586*a7992981SDenis Revunov         __bolt_instr_data_dump(FD);
158716a497c6SRafael Auler       break;
158816a497c6SRafael Auler     }
158976d346caSVladislav Khmelevsky 
159016a497c6SRafael Auler     if (++Ellapsed < __bolt_instr_sleep_time)
159116a497c6SRafael Auler       continue;
159276d346caSVladislav Khmelevsky 
159316a497c6SRafael Auler     Ellapsed = 0;
1594*a7992981SDenis Revunov     __bolt_instr_data_dump(FD);
159576d346caSVladislav Khmelevsky     if (__bolt_instr_no_counters_clear == false)
159616a497c6SRafael Auler       __bolt_instr_clear_counters();
159716a497c6SRafael Auler   }
159876d346caSVladislav Khmelevsky 
159976d346caSVladislav Khmelevsky out:;
160016a497c6SRafael Auler   DEBUG(report("My parent process is dead, bye!\n"));
1601*a7992981SDenis Revunov   __close(FD);
160216a497c6SRafael Auler   __exit(0);
160316a497c6SRafael Auler }
160416a497c6SRafael Auler 
160516a497c6SRafael Auler extern "C" void __bolt_instr_indirect_call();
160616a497c6SRafael Auler extern "C" void __bolt_instr_indirect_tailcall();
160716a497c6SRafael Auler 
160816a497c6SRafael Auler /// Initialization code
1609ad79d517SVasily Leonenko extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {
161058a16d84SAmir Ayupov   __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;
161158a16d84SAmir Ayupov   __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;
161258a16d84SAmir Ayupov 
161316a497c6SRafael Auler   const uint64_t CountersStart =
161416a497c6SRafael Auler       reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);
161516a497c6SRafael Auler   const uint64_t CountersEnd = alignTo(
161616a497c6SRafael Auler       reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),
161716a497c6SRafael Auler       0x1000);
161816a497c6SRafael Auler   DEBUG(reportNumber("replace mmap start: ", CountersStart, 16));
161916a497c6SRafael Auler   DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16));
162016a497c6SRafael Auler   assert(CountersEnd > CountersStart, "no counters");
162158a16d84SAmir Ayupov 
162258a16d84SAmir Ayupov   const bool Shared = !__bolt_instr_use_pid;
162358a16d84SAmir Ayupov   const uint64_t MapPrivateOrShared = Shared ? MAP_SHARED : MAP_PRIVATE;
162458a16d84SAmir Ayupov 
16258b23a853SDenis Revunov   void *Ret =
16268b23a853SDenis Revunov       __mmap(CountersStart, CountersEnd - CountersStart, PROT_READ | PROT_WRITE,
162758a16d84SAmir Ayupov              MAP_ANONYMOUS | MapPrivateOrShared | MAP_FIXED, -1, 0);
16288ed172cfSDenis Revunov   assert(Ret != MAP_FAILED, "__bolt_instr_setup: Failed to mmap counters!");
162958a16d84SAmir Ayupov 
16300cc19b56SDenis Revunov   GlobalMetadataStorage = __mmap(0, 4096, PROT_READ | PROT_WRITE,
16310cc19b56SDenis Revunov                                  MapPrivateOrShared | MAP_ANONYMOUS, -1, 0);
16320cc19b56SDenis Revunov   assert(GlobalMetadataStorage != MAP_FAILED,
16330cc19b56SDenis Revunov          "__bolt_instr_setup: failed to mmap page for metadata!");
16340cc19b56SDenis Revunov 
16350cc19b56SDenis Revunov   GlobalAlloc = new (GlobalMetadataStorage) BumpPtrAllocator;
16360cc19b56SDenis Revunov   // Conservatively reserve 100MiB
16370cc19b56SDenis Revunov   GlobalAlloc->setMaxSize(0x6400000);
16380cc19b56SDenis Revunov   GlobalAlloc->setShared(Shared);
16390cc19b56SDenis Revunov   GlobalWriteProfileMutex = new (*GlobalAlloc, 0) Mutex();
164016a497c6SRafael Auler   if (__bolt_instr_num_ind_calls > 0)
164116a497c6SRafael Auler     GlobalIndCallCounters =
16420cc19b56SDenis Revunov         new (*GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];
164316a497c6SRafael Auler 
164416a497c6SRafael Auler   if (__bolt_instr_sleep_time != 0) {
164576d346caSVladislav Khmelevsky     // Separate instrumented process to the own process group
164676d346caSVladislav Khmelevsky     if (__bolt_instr_wait_forks)
164776d346caSVladislav Khmelevsky       __setpgid(0, 0);
164876d346caSVladislav Khmelevsky 
1649c7306cc2SAmir Ayupov     if (long PID = __fork())
165016a497c6SRafael Auler       return;
165116a497c6SRafael Auler     watchProcess();
165216a497c6SRafael Auler   }
165316a497c6SRafael Auler }
165416a497c6SRafael Auler 
1655361f3b55SVladislav Khmelevsky extern "C" __attribute((force_align_arg_pointer)) void
1656361f3b55SVladislav Khmelevsky instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {
16570cc19b56SDenis Revunov   GlobalIndCallCounters[IndCallID].incrementVal(Target, *GlobalAlloc);
165816a497c6SRafael Auler }
165916a497c6SRafael Auler 
166016a497c6SRafael Auler /// We receive as in-stack arguments the identifier of the indirect call site
166116a497c6SRafael Auler /// as well as the target address for the call
166216a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_indirect_call()
166316a497c6SRafael Auler {
166416a497c6SRafael Auler   __asm__ __volatile__(SAVE_ALL
1665361f3b55SVladislav Khmelevsky                        "mov 0xa0(%%rsp), %%rdi\n"
1666361f3b55SVladislav Khmelevsky                        "mov 0x98(%%rsp), %%rsi\n"
166716a497c6SRafael Auler                        "call instrumentIndirectCall\n"
166816a497c6SRafael Auler                        RESTORE_ALL
1669361f3b55SVladislav Khmelevsky                        "ret\n"
167016a497c6SRafael Auler                        :::);
167116a497c6SRafael Auler }
167216a497c6SRafael Auler 
167316a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()
167416a497c6SRafael Auler {
167516a497c6SRafael Auler   __asm__ __volatile__(SAVE_ALL
1676361f3b55SVladislav Khmelevsky                        "mov 0x98(%%rsp), %%rdi\n"
1677361f3b55SVladislav Khmelevsky                        "mov 0x90(%%rsp), %%rsi\n"
167816a497c6SRafael Auler                        "call instrumentIndirectCall\n"
167916a497c6SRafael Auler                        RESTORE_ALL
1680361f3b55SVladislav Khmelevsky                        "ret\n"
168116a497c6SRafael Auler                        :::);
168216a497c6SRafael Auler }
168316a497c6SRafael Auler 
168416a497c6SRafael Auler /// This is hooking ELF's entry, it needs to save all machine state.
168516a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_start()
168616a497c6SRafael Auler {
168716a497c6SRafael Auler   __asm__ __volatile__(SAVE_ALL
168816a497c6SRafael Auler                        "call __bolt_instr_setup\n"
168916a497c6SRafael Auler                        RESTORE_ALL
1690ad79d517SVasily Leonenko                        "jmp __bolt_start_trampoline\n"
169116a497c6SRafael Auler                        :::);
169216a497c6SRafael Auler }
169316a497c6SRafael Auler 
169416a497c6SRafael Auler /// This is hooking into ELF's DT_FINI
169516a497c6SRafael Auler extern "C" void __bolt_instr_fini() {
1696553f28e9SVladislav Khmelevsky   __bolt_fini_trampoline();
1697*a7992981SDenis Revunov   if (__bolt_instr_sleep_time == 0) {
1698*a7992981SDenis Revunov     int FD = openProfile();
1699*a7992981SDenis Revunov     __bolt_instr_data_dump(FD);
1700*a7992981SDenis Revunov     __close(FD);
1701*a7992981SDenis Revunov   }
170216a497c6SRafael Auler   DEBUG(report("Finished.\n"));
170362aa74f8SRafael Auler }
1704bbd9d610SAlexander Shaposhnikov 
17053b876cc3SAlexander Shaposhnikov #endif
17063b876cc3SAlexander Shaposhnikov 
17073b876cc3SAlexander Shaposhnikov #if defined(__APPLE__)
1708bbd9d610SAlexander Shaposhnikov 
1709a0dd5b05SAlexander Shaposhnikov extern "C" void __bolt_instr_data_dump() {
1710a0dd5b05SAlexander Shaposhnikov   ProfileWriterContext Ctx = readDescriptions();
1711a0dd5b05SAlexander Shaposhnikov 
1712a0dd5b05SAlexander Shaposhnikov   int FD = 2;
1713a0dd5b05SAlexander Shaposhnikov   BumpPtrAllocator Alloc;
1714a0dd5b05SAlexander Shaposhnikov   const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1715a0dd5b05SAlexander Shaposhnikov   uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();
1716a0dd5b05SAlexander Shaposhnikov 
1717a0dd5b05SAlexander Shaposhnikov   for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {
1718a0dd5b05SAlexander Shaposhnikov     FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1719a0dd5b05SAlexander Shaposhnikov     Alloc.clear();
1720a0dd5b05SAlexander Shaposhnikov     DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1721a0dd5b05SAlexander Shaposhnikov   }
1722a0dd5b05SAlexander Shaposhnikov   assert(FuncDesc == (void *)Ctx.Strings,
1723a0dd5b05SAlexander Shaposhnikov          "FuncDesc ptr must be equal to stringtable");
1724a0dd5b05SAlexander Shaposhnikov }
1725a0dd5b05SAlexander Shaposhnikov 
1726bbd9d610SAlexander Shaposhnikov // On OSX/iOS the final symbol name of an extern "C" function/variable contains
1727bbd9d610SAlexander Shaposhnikov // one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
17283b876cc3SAlexander Shaposhnikov extern "C"
17293b876cc3SAlexander Shaposhnikov __attribute__((section("__TEXT,__setup")))
17303b876cc3SAlexander Shaposhnikov __attribute__((force_align_arg_pointer))
17313b876cc3SAlexander Shaposhnikov void _bolt_instr_setup() {
1732a0dd5b05SAlexander Shaposhnikov   __asm__ __volatile__(SAVE_ALL :::);
17333b876cc3SAlexander Shaposhnikov 
1734a0dd5b05SAlexander Shaposhnikov   report("Hello!\n");
17353b876cc3SAlexander Shaposhnikov 
1736a0dd5b05SAlexander Shaposhnikov   __asm__ __volatile__(RESTORE_ALL :::);
17371cf23e5eSAlexander Shaposhnikov }
1738bbd9d610SAlexander Shaposhnikov 
17393b876cc3SAlexander Shaposhnikov extern "C"
17403b876cc3SAlexander Shaposhnikov __attribute__((section("__TEXT,__fini")))
17413b876cc3SAlexander Shaposhnikov __attribute__((force_align_arg_pointer))
17423b876cc3SAlexander Shaposhnikov void _bolt_instr_fini() {
1743a0dd5b05SAlexander Shaposhnikov   report("Bye!\n");
1744a0dd5b05SAlexander Shaposhnikov   __bolt_instr_data_dump();
1745e067f2adSAlexander Shaposhnikov }
1746e067f2adSAlexander Shaposhnikov 
1747bbd9d610SAlexander Shaposhnikov #endif
1748cb8d701bSVladislav Khmelevsky #endif
1749