12f09f445SMaksim Panchenko //===- bolt/runtime/instr.cpp ---------------------------------------------===//
262aa74f8SRafael Auler //
3da752c9cSRafael Auler // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4da752c9cSRafael Auler // See https://llvm.org/LICENSE.txt for license information.
5da752c9cSRafael Auler // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
662aa74f8SRafael Auler //
762aa74f8SRafael Auler //===----------------------------------------------------------------------===//
862aa74f8SRafael Auler //
916a497c6SRafael Auler // BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does
1016a497c6SRafael Auler // not support linking modules with dependencies on one another into the final
1116a497c6SRafael Auler // binary (TODO?), which means this library has to be self-contained in a single
1216a497c6SRafael Auler // module.
1316a497c6SRafael Auler //
1416a497c6SRafael Auler // All extern declarations here need to be defined by BOLT itself. Those will be
1516a497c6SRafael Auler // undefined symbols that BOLT needs to resolve by emitting these symbols with
1616a497c6SRafael Auler // MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible
1716a497c6SRafael Auler // for defining the symbols here and these two files have a tight coupling: one
1816a497c6SRafael Auler // working statically when you run BOLT and another during program runtime when
1916a497c6SRafael Auler // you run an instrumented binary. The main goal here is to output an fdata file
2016a497c6SRafael Auler // (BOLT profile) with the instrumentation counters inserted by the static pass.
2116a497c6SRafael Auler // Counters for indirect calls are an exception, as we can't know them
2216a497c6SRafael Auler // statically. These counters are created and managed here. To allow this, we
2316a497c6SRafael Auler // need a minimal framework for allocating memory dynamically. We provide this
2416a497c6SRafael Auler // with the BumpPtrAllocator class (not LLVM's, but our own version of it).
2516a497c6SRafael Auler //
2616a497c6SRafael Auler // Since this code is intended to be inserted into any executable, we decided to
2716a497c6SRafael Auler // make it standalone and do not depend on any external libraries (i.e. language
2816a497c6SRafael Auler // support libraries, such as glibc or stdc++). To allow this, we provide a few
2916a497c6SRafael Auler // light implementations of common OS interacting functionalities using direct
3016a497c6SRafael Auler // syscall wrappers. Our simple allocator doesn't manage deallocations that
3116a497c6SRafael Auler // fragment the memory space, so it's stack based. This is the minimal framework
3216a497c6SRafael Auler // provided here to allow processing instrumented counters and writing fdata.
3316a497c6SRafael Auler //
3416a497c6SRafael Auler // In the C++ idiom used here, we never use or rely on constructors or
3516a497c6SRafael Auler // destructors for global objects. That's because those need support from the
3616a497c6SRafael Auler // linker in initialization/finalization code, and we want to keep our linker
3716a497c6SRafael Auler // very simple. Similarly, we don't create any global objects that are zero
3816a497c6SRafael Auler // initialized, since those would need to go .bss, which our simple linker also
3916a497c6SRafael Auler // don't support (TODO?).
4062aa74f8SRafael Auler //
4162aa74f8SRafael Auler //===----------------------------------------------------------------------===//
4262aa74f8SRafael Auler
439bd71615SXun Li #include "common.h"
4462aa74f8SRafael Auler
4516a497c6SRafael Auler // Enables a very verbose logging to stderr useful when debugging
46cc4b2fb6SRafael Auler //#define ENABLE_DEBUG
47cc4b2fb6SRafael Auler
48cc4b2fb6SRafael Auler #ifdef ENABLE_DEBUG
49cc4b2fb6SRafael Auler #define DEBUG(X) \
50cc4b2fb6SRafael Auler { X; }
51cc4b2fb6SRafael Auler #else
52cc4b2fb6SRafael Auler #define DEBUG(X) \
53cc4b2fb6SRafael Auler {}
54cc4b2fb6SRafael Auler #endif
55cc4b2fb6SRafael Auler
56af58da4eSVladislav Khmelevsky #pragma GCC visibility push(hidden)
573b876cc3SAlexander Shaposhnikov
583b876cc3SAlexander Shaposhnikov extern "C" {
59553f28e9SVladislav Khmelevsky
60553f28e9SVladislav Khmelevsky #if defined(__APPLE__)
613b876cc3SAlexander Shaposhnikov extern uint64_t* _bolt_instr_locations_getter();
623b876cc3SAlexander Shaposhnikov extern uint32_t _bolt_num_counters_getter();
633b876cc3SAlexander Shaposhnikov
64a0dd5b05SAlexander Shaposhnikov extern uint8_t* _bolt_instr_tables_getter();
65a0dd5b05SAlexander Shaposhnikov extern uint32_t _bolt_instr_num_funcs_getter();
663b876cc3SAlexander Shaposhnikov
673b876cc3SAlexander Shaposhnikov #else
68bbd9d610SAlexander Shaposhnikov
6916a497c6SRafael Auler // Main counters inserted by instrumentation, incremented during runtime when
7016a497c6SRafael Auler // points of interest (locations) in the program are reached. Those are direct
7116a497c6SRafael Auler // calls and direct and indirect branches (local ones). There are also counters
7216a497c6SRafael Auler // for basic block execution if they are a spanning tree leaf and need to be
7316a497c6SRafael Auler // counted in order to infer the execution count of other edges of the CFG.
7462aa74f8SRafael Auler extern uint64_t __bolt_instr_locations[];
7516a497c6SRafael Auler extern uint32_t __bolt_num_counters;
7616a497c6SRafael Auler // Descriptions are serialized metadata about binary functions written by BOLT,
7716a497c6SRafael Auler // so we have a minimal understanding about the program structure. For a
7816a497c6SRafael Auler // reference on the exact format of this metadata, see *Description structs,
7916a497c6SRafael Auler // Location, IntrumentedNode and EntryNode.
8016a497c6SRafael Auler // Number of indirect call site descriptions
8116a497c6SRafael Auler extern uint32_t __bolt_instr_num_ind_calls;
8216a497c6SRafael Auler // Number of indirect call target descriptions
8316a497c6SRafael Auler extern uint32_t __bolt_instr_num_ind_targets;
84cc4b2fb6SRafael Auler // Number of function descriptions
85cc4b2fb6SRafael Auler extern uint32_t __bolt_instr_num_funcs;
8616a497c6SRafael Auler // Time to sleep across dumps (when we write the fdata profile to disk)
8716a497c6SRafael Auler extern uint32_t __bolt_instr_sleep_time;
8876d346caSVladislav Khmelevsky // Do not clear counters across dumps, rewrite file with the updated values
8976d346caSVladislav Khmelevsky extern bool __bolt_instr_no_counters_clear;
9076d346caSVladislav Khmelevsky // Wait until all forks of instrumented process will finish
9176d346caSVladislav Khmelevsky extern bool __bolt_instr_wait_forks;
92cc4b2fb6SRafael Auler // Filename to dump data to
9362aa74f8SRafael Auler extern char __bolt_instr_filename[];
94519cbbaaSVasily Leonenko // Instumented binary file path
95519cbbaaSVasily Leonenko extern char __bolt_instr_binpath[];
9616a497c6SRafael Auler // If true, append current PID to the fdata filename when creating it so
9716a497c6SRafael Auler // different invocations of the same program can be differentiated.
9816a497c6SRafael Auler extern bool __bolt_instr_use_pid;
9916a497c6SRafael Auler // Functions that will be used to instrument indirect calls. BOLT static pass
10016a497c6SRafael Auler // will identify indirect calls and modify them to load the address in these
10116a497c6SRafael Auler // trampolines and call this address instead. BOLT can't use direct calls to
10216a497c6SRafael Auler // our handlers because our addresses here are not known at analysis time. We
10316a497c6SRafael Auler // only support resolving dependencies from this file to the output of BOLT,
10416a497c6SRafael Auler // *not* the other way around.
10516a497c6SRafael Auler // TODO: We need better linking support to make that happen.
106361f3b55SVladislav Khmelevsky extern void (*__bolt_ind_call_counter_func_pointer)();
107361f3b55SVladislav Khmelevsky extern void (*__bolt_ind_tailcall_counter_func_pointer)();
108ad79d517SVasily Leonenko // Function pointers to init/fini trampoline routines in the binary, so we can
109ad79d517SVasily Leonenko // resume regular execution of these functions that we hooked
110553f28e9SVladislav Khmelevsky extern void __bolt_start_trampoline();
111553f28e9SVladislav Khmelevsky extern void __bolt_fini_trampoline();
11262aa74f8SRafael Auler
113a0dd5b05SAlexander Shaposhnikov #endif
114553f28e9SVladislav Khmelevsky }
115a0dd5b05SAlexander Shaposhnikov
116cc4b2fb6SRafael Auler namespace {
117cc4b2fb6SRafael Auler
118cc4b2fb6SRafael Auler /// A simple allocator that mmaps a fixed size region and manages this space
119cc4b2fb6SRafael Auler /// in a stack fashion, meaning you always deallocate the last element that
12016a497c6SRafael Auler /// was allocated. In practice, we don't need to deallocate individual elements.
12116a497c6SRafael Auler /// We monotonically increase our usage and then deallocate everything once we
12216a497c6SRafael Auler /// are done processing something.
123cc4b2fb6SRafael Auler class BumpPtrAllocator {
12416a497c6SRafael Auler /// This is written before each allocation and act as a canary to detect when
12516a497c6SRafael Auler /// a bug caused our program to cross allocation boundaries.
126cc4b2fb6SRafael Auler struct EntryMetadata {
127cc4b2fb6SRafael Auler uint64_t Magic;
128cc4b2fb6SRafael Auler uint64_t AllocSize;
129cc4b2fb6SRafael Auler };
1309bd71615SXun Li
131cc4b2fb6SRafael Auler public:
allocate(size_t Size)132faaefff6SAlexander Shaposhnikov void *allocate(size_t Size) {
13316a497c6SRafael Auler Lock L(M);
134a0dd5b05SAlexander Shaposhnikov
135cc4b2fb6SRafael Auler if (StackBase == nullptr) {
13616a497c6SRafael Auler StackBase = reinterpret_cast<uint8_t *>(
1378b23a853SDenis Revunov __mmap(0, MaxSize, PROT_READ | PROT_WRITE,
1388b23a853SDenis Revunov (Shared ? MAP_SHARED : MAP_PRIVATE) | MAP_ANONYMOUS, -1, 0));
1398ed172cfSDenis Revunov assert(StackBase != MAP_FAILED,
1408ed172cfSDenis Revunov "BumpPtrAllocator: failed to mmap stack!");
141cc4b2fb6SRafael Auler StackSize = 0;
142cc4b2fb6SRafael Auler }
143a0dd5b05SAlexander Shaposhnikov
144cc4b2fb6SRafael Auler Size = alignTo(Size + sizeof(EntryMetadata), 16);
145cc4b2fb6SRafael Auler uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata);
146cc4b2fb6SRafael Auler auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize);
14716a497c6SRafael Auler M->Magic = Magic;
148cc4b2fb6SRafael Auler M->AllocSize = Size;
149cc4b2fb6SRafael Auler StackSize += Size;
15016a497c6SRafael Auler assert(StackSize < MaxSize, "allocator ran out of memory");
151cc4b2fb6SRafael Auler return AllocAddress;
152cc4b2fb6SRafael Auler }
153cc4b2fb6SRafael Auler
15416a497c6SRafael Auler #ifdef DEBUG
15516a497c6SRafael Auler /// Element-wise deallocation is only used for debugging to catch memory
15616a497c6SRafael Auler /// bugs by checking magic bytes. Ordinarily, we reset the allocator once
15716a497c6SRafael Auler /// we are done with it. Reset is done with clear(). There's no need
15816a497c6SRafael Auler /// to deallocate each element individually.
deallocate(void * Ptr)159cc4b2fb6SRafael Auler void deallocate(void *Ptr) {
16016a497c6SRafael Auler Lock L(M);
161cc4b2fb6SRafael Auler uint8_t MetadataOffset = sizeof(EntryMetadata);
162cc4b2fb6SRafael Auler auto *M = reinterpret_cast<EntryMetadata *>(
163cc4b2fb6SRafael Auler reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset);
164cc4b2fb6SRafael Auler const uint8_t *StackTop = StackBase + StackSize + MetadataOffset;
165cc4b2fb6SRafael Auler // Validate size
166cc4b2fb6SRafael Auler if (Ptr != StackTop - M->AllocSize) {
16716a497c6SRafael Auler // Failed validation, check if it is a pointer returned by operator new []
168cc4b2fb6SRafael Auler MetadataOffset +=
169cc4b2fb6SRafael Auler sizeof(uint64_t); // Space for number of elements alloc'ed
170cc4b2fb6SRafael Auler M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) -
171cc4b2fb6SRafael Auler MetadataOffset);
17216a497c6SRafael Auler // Ok, it failed both checks if this assertion fails. Stop the program, we
17316a497c6SRafael Auler // have a memory bug.
174cc4b2fb6SRafael Auler assert(Ptr == StackTop - M->AllocSize,
175cc4b2fb6SRafael Auler "must deallocate the last element alloc'ed");
176cc4b2fb6SRafael Auler }
17716a497c6SRafael Auler assert(M->Magic == Magic, "allocator magic is corrupt");
178cc4b2fb6SRafael Auler StackSize -= M->AllocSize;
179cc4b2fb6SRafael Auler }
18016a497c6SRafael Auler #else
deallocate(void *)18116a497c6SRafael Auler void deallocate(void *) {}
18216a497c6SRafael Auler #endif
18316a497c6SRafael Auler
clear()18416a497c6SRafael Auler void clear() {
18516a497c6SRafael Auler Lock L(M);
18616a497c6SRafael Auler StackSize = 0;
18716a497c6SRafael Auler }
18816a497c6SRafael Auler
18916a497c6SRafael Auler /// Set mmap reservation size (only relevant before first allocation)
setMaxSize(uint64_t Size)1909bd71615SXun Li void setMaxSize(uint64_t Size) { MaxSize = Size; }
19116a497c6SRafael Auler
19216a497c6SRafael Auler /// Set mmap reservation privacy (only relevant before first allocation)
setShared(bool S)1939bd71615SXun Li void setShared(bool S) { Shared = S; }
19416a497c6SRafael Auler
destroy()19516a497c6SRafael Auler void destroy() {
19616a497c6SRafael Auler if (StackBase == nullptr)
19716a497c6SRafael Auler return;
19816a497c6SRafael Auler __munmap(StackBase, MaxSize);
19916a497c6SRafael Auler }
200cc4b2fb6SRafael Auler
2010cc19b56SDenis Revunov // Placement operator to construct allocator in possibly shared mmaped memory
operator new(size_t,void * Ptr)2020cc19b56SDenis Revunov static void *operator new(size_t, void *Ptr) { return Ptr; };
2030cc19b56SDenis Revunov
204cc4b2fb6SRafael Auler private:
20516a497c6SRafael Auler static constexpr uint64_t Magic = 0x1122334455667788ull;
20616a497c6SRafael Auler uint64_t MaxSize = 0xa00000;
207cc4b2fb6SRafael Auler uint8_t *StackBase{nullptr};
208cc4b2fb6SRafael Auler uint64_t StackSize{0};
20916a497c6SRafael Auler bool Shared{false};
21016a497c6SRafael Auler Mutex M;
211cc4b2fb6SRafael Auler };
212cc4b2fb6SRafael Auler
21316a497c6SRafael Auler /// Used for allocating indirect call instrumentation counters. Initialized by
21416a497c6SRafael Auler /// __bolt_instr_setup, our initialization routine.
2150cc19b56SDenis Revunov BumpPtrAllocator *GlobalAlloc;
2160cc19b56SDenis Revunov
217a86dd9aeSDenis Revunov // Base address which we substract from recorded PC values when searching for
218a86dd9aeSDenis Revunov // indirect call description entries. Needed because indCall descriptions are
219a86dd9aeSDenis Revunov // mapped read-only and contain static addresses. Initialized in
220a86dd9aeSDenis Revunov // __bolt_instr_setup.
221a86dd9aeSDenis Revunov uint64_t TextBaseAddress = 0;
222a86dd9aeSDenis Revunov
2230cc19b56SDenis Revunov // Storage for GlobalAlloc which can be shared if not using
2240cc19b56SDenis Revunov // instrumentation-file-append-pid.
2250cc19b56SDenis Revunov void *GlobalMetadataStorage;
2260cc19b56SDenis Revunov
227cc4b2fb6SRafael Auler } // anonymous namespace
228cc4b2fb6SRafael Auler
229cc4b2fb6SRafael Auler // User-defined placement new operators. We only use those (as opposed to
230cc4b2fb6SRafael Auler // overriding the regular operator new) so we can keep our allocator in the
231cc4b2fb6SRafael Auler // stack instead of in a data section (global).
operator new(size_t Sz,BumpPtrAllocator & A)232faaefff6SAlexander Shaposhnikov void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); }
operator new(size_t Sz,BumpPtrAllocator & A,char C)233faaefff6SAlexander Shaposhnikov void *operator new(size_t Sz, BumpPtrAllocator &A, char C) {
234cc4b2fb6SRafael Auler auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
235ea2182feSMaksim Panchenko memset(Ptr, C, Sz);
236cc4b2fb6SRafael Auler return Ptr;
237cc4b2fb6SRafael Auler }
operator new[](size_t Sz,BumpPtrAllocator & A)238faaefff6SAlexander Shaposhnikov void *operator new[](size_t Sz, BumpPtrAllocator &A) {
239cc4b2fb6SRafael Auler return A.allocate(Sz);
240cc4b2fb6SRafael Auler }
operator new[](size_t Sz,BumpPtrAllocator & A,char C)241faaefff6SAlexander Shaposhnikov void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) {
242cc4b2fb6SRafael Auler auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz));
243ea2182feSMaksim Panchenko memset(Ptr, C, Sz);
244cc4b2fb6SRafael Auler return Ptr;
245cc4b2fb6SRafael Auler }
246cc4b2fb6SRafael Auler // Only called during exception unwinding (useless). We must manually dealloc.
247cc4b2fb6SRafael Auler // C++ language weirdness
operator delete(void * Ptr,BumpPtrAllocator & A)2489bd71615SXun Li void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); }
249cc4b2fb6SRafael Auler
250cc4b2fb6SRafael Auler namespace {
251cc4b2fb6SRafael Auler
2529aa134dcSVasily Leonenko // Disable instrumentation optimizations that sacrifice profile accuracy
2539aa134dcSVasily Leonenko extern "C" bool __bolt_instr_conservative;
2549aa134dcSVasily Leonenko
25516a497c6SRafael Auler /// Basic key-val atom stored in our hash
25616a497c6SRafael Auler struct SimpleHashTableEntryBase {
25716a497c6SRafael Auler uint64_t Key;
25816a497c6SRafael Auler uint64_t Val;
dump__anonc11c1cb60211::SimpleHashTableEntryBase25947934c11SDenis Revunov void dump(const char *Msg = nullptr) {
26047934c11SDenis Revunov // TODO: make some sort of formatting function
26147934c11SDenis Revunov // Currently we have to do it the ugly way because
26247934c11SDenis Revunov // we want every message to be printed atomically via a single call to
26347934c11SDenis Revunov // __write. If we use reportNumber() and others nultiple times, we'll get
26447934c11SDenis Revunov // garbage in mulithreaded environment
26547934c11SDenis Revunov char Buf[BufSize];
26647934c11SDenis Revunov char *Ptr = Buf;
26747934c11SDenis Revunov Ptr = intToStr(Ptr, __getpid(), 10);
26847934c11SDenis Revunov *Ptr++ = ':';
26947934c11SDenis Revunov *Ptr++ = ' ';
27047934c11SDenis Revunov if (Msg)
27147934c11SDenis Revunov Ptr = strCopy(Ptr, Msg, strLen(Msg));
27247934c11SDenis Revunov *Ptr++ = '0';
27347934c11SDenis Revunov *Ptr++ = 'x';
27447934c11SDenis Revunov Ptr = intToStr(Ptr, (uint64_t)this, 16);
27547934c11SDenis Revunov *Ptr++ = ':';
27647934c11SDenis Revunov *Ptr++ = ' ';
27747934c11SDenis Revunov Ptr = strCopy(Ptr, "MapEntry(0x", sizeof("MapEntry(0x") - 1);
27847934c11SDenis Revunov Ptr = intToStr(Ptr, Key, 16);
27947934c11SDenis Revunov *Ptr++ = ',';
28047934c11SDenis Revunov *Ptr++ = ' ';
28147934c11SDenis Revunov *Ptr++ = '0';
28247934c11SDenis Revunov *Ptr++ = 'x';
28347934c11SDenis Revunov Ptr = intToStr(Ptr, Val, 16);
28447934c11SDenis Revunov *Ptr++ = ')';
28547934c11SDenis Revunov *Ptr++ = '\n';
28647934c11SDenis Revunov assert(Ptr - Buf < BufSize, "Buffer overflow!");
28747934c11SDenis Revunov // print everything all at once for atomicity
28847934c11SDenis Revunov __write(2, Buf, Ptr - Buf);
28947934c11SDenis Revunov }
29016a497c6SRafael Auler };
29116a497c6SRafael Auler
29216a497c6SRafael Auler /// This hash table implementation starts by allocating a table of size
29316a497c6SRafael Auler /// InitialSize. When conflicts happen in this main table, it resolves
29416a497c6SRafael Auler /// them by chaining a new table of size IncSize. It never reallocs as our
29516a497c6SRafael Auler /// allocator doesn't support it. The key is intended to be function pointers.
29616a497c6SRafael Auler /// There's no clever hash function (it's just x mod size, size being prime).
29716a497c6SRafael Auler /// I never tuned the coefficientes in the modular equation (TODO)
29816a497c6SRafael Auler /// This is used for indirect calls (each call site has one of this, so it
29916a497c6SRafael Auler /// should have a small footprint) and for tallying call counts globally for
30016a497c6SRafael Auler /// each target to check if we missed the origin of some calls (this one is a
30116a497c6SRafael Auler /// large instantiation of this template, since it is global for all call sites)
30216a497c6SRafael Auler template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7,
30316a497c6SRafael Auler uint32_t IncSize = 7>
30416a497c6SRafael Auler class SimpleHashTable {
30516a497c6SRafael Auler public:
30616a497c6SRafael Auler using MapEntry = T;
30716a497c6SRafael Auler
30816a497c6SRafael Auler /// Increment by 1 the value of \p Key. If it is not in this table, it will be
30916a497c6SRafael Auler /// added to the table and its value set to 1.
incrementVal(uint64_t Key,BumpPtrAllocator & Alloc)31016a497c6SRafael Auler void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) {
3110cc19b56SDenis Revunov if (!__bolt_instr_conservative) {
3120cc19b56SDenis Revunov TryLock L(M);
3130cc19b56SDenis Revunov if (!L.isLocked())
3140cc19b56SDenis Revunov return;
3150cc19b56SDenis Revunov auto &E = getOrAllocEntry(Key, Alloc);
3160cc19b56SDenis Revunov ++E.Val;
3170cc19b56SDenis Revunov return;
3180cc19b56SDenis Revunov }
3190cc19b56SDenis Revunov Lock L(M);
3200cc19b56SDenis Revunov auto &E = getOrAllocEntry(Key, Alloc);
3210cc19b56SDenis Revunov ++E.Val;
32216a497c6SRafael Auler }
32316a497c6SRafael Auler
32416a497c6SRafael Auler /// Basic member accessing interface. Here we pass the allocator explicitly to
32516a497c6SRafael Auler /// avoid storing a pointer to it as part of this table (remember there is one
3261a2f8336Sspaette /// hash for each indirect call site, so we want to minimize our footprint).
get(uint64_t Key,BumpPtrAllocator & Alloc)32716a497c6SRafael Auler MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) {
3289aa134dcSVasily Leonenko if (!__bolt_instr_conservative) {
3299aa134dcSVasily Leonenko TryLock L(M);
3309aa134dcSVasily Leonenko if (!L.isLocked())
3319aa134dcSVasily Leonenko return NoEntry;
3329aa134dcSVasily Leonenko return getOrAllocEntry(Key, Alloc);
3339aa134dcSVasily Leonenko }
33416a497c6SRafael Auler Lock L(M);
3359aa134dcSVasily Leonenko return getOrAllocEntry(Key, Alloc);
33616a497c6SRafael Auler }
33716a497c6SRafael Auler
33816a497c6SRafael Auler /// Traverses all elements in the table
33916a497c6SRafael Auler template <typename... Args>
forEachElement(void (* Callback)(MapEntry &,Args...),Args...args)34016a497c6SRafael Auler void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) {
341bd301a41SMichał Chojnowski Lock L(M);
34216a497c6SRafael Auler if (!TableRoot)
34316a497c6SRafael Auler return;
34416a497c6SRafael Auler return forEachElement(Callback, InitialSize, TableRoot, args...);
34516a497c6SRafael Auler }
34616a497c6SRafael Auler
34716a497c6SRafael Auler void resetCounters();
34816a497c6SRafael Auler
34916a497c6SRafael Auler private:
35016a497c6SRafael Auler constexpr static uint64_t VacantMarker = 0;
35116a497c6SRafael Auler constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull;
35216a497c6SRafael Auler
35316a497c6SRafael Auler MapEntry *TableRoot{nullptr};
3549aa134dcSVasily Leonenko MapEntry NoEntry;
35516a497c6SRafael Auler Mutex M;
35616a497c6SRafael Auler
35716a497c6SRafael Auler template <typename... Args>
forEachElement(void (* Callback)(MapEntry &,Args...),uint32_t NumEntries,MapEntry * Entries,Args...args)35816a497c6SRafael Auler void forEachElement(void (*Callback)(MapEntry &, Args...),
35916a497c6SRafael Auler uint32_t NumEntries, MapEntry *Entries, Args... args) {
360c7306cc2SAmir Ayupov for (uint32_t I = 0; I < NumEntries; ++I) {
361c7306cc2SAmir Ayupov MapEntry &Entry = Entries[I];
36216a497c6SRafael Auler if (Entry.Key == VacantMarker)
36316a497c6SRafael Auler continue;
36416a497c6SRafael Auler if (Entry.Key & FollowUpTableMarker) {
3650cc19b56SDenis Revunov MapEntry *Next =
3660cc19b56SDenis Revunov reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker);
3670cc19b56SDenis Revunov assert(Next != Entries, "Circular reference!");
3680cc19b56SDenis Revunov forEachElement(Callback, IncSize, Next, args...);
36916a497c6SRafael Auler continue;
37016a497c6SRafael Auler }
37116a497c6SRafael Auler Callback(Entry, args...);
37216a497c6SRafael Auler }
37316a497c6SRafael Auler }
37416a497c6SRafael Auler
firstAllocation(uint64_t Key,BumpPtrAllocator & Alloc)37516a497c6SRafael Auler MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) {
37616a497c6SRafael Auler TableRoot = new (Alloc, 0) MapEntry[InitialSize];
377c7306cc2SAmir Ayupov MapEntry &Entry = TableRoot[Key % InitialSize];
37816a497c6SRafael Auler Entry.Key = Key;
37947934c11SDenis Revunov // DEBUG(Entry.dump("Created root entry: "));
38016a497c6SRafael Auler return Entry;
38116a497c6SRafael Auler }
38216a497c6SRafael Auler
getEntry(MapEntry * Entries,uint64_t Key,uint64_t Selector,BumpPtrAllocator & Alloc,int CurLevel)38316a497c6SRafael Auler MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector,
38416a497c6SRafael Auler BumpPtrAllocator &Alloc, int CurLevel) {
38547934c11SDenis Revunov // DEBUG(reportNumber("getEntry called, level ", CurLevel, 10));
38616a497c6SRafael Auler const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize;
38716a497c6SRafael Auler uint64_t Remainder = Selector / NumEntries;
38816a497c6SRafael Auler Selector = Selector % NumEntries;
389c7306cc2SAmir Ayupov MapEntry &Entry = Entries[Selector];
39016a497c6SRafael Auler
39116a497c6SRafael Auler // A hit
39216a497c6SRafael Auler if (Entry.Key == Key) {
39347934c11SDenis Revunov // DEBUG(Entry.dump("Hit: "));
39416a497c6SRafael Auler return Entry;
39516a497c6SRafael Auler }
39616a497c6SRafael Auler
39716a497c6SRafael Auler // Vacant - add new entry
39816a497c6SRafael Auler if (Entry.Key == VacantMarker) {
39916a497c6SRafael Auler Entry.Key = Key;
40047934c11SDenis Revunov // DEBUG(Entry.dump("Adding new entry: "));
40116a497c6SRafael Auler return Entry;
40216a497c6SRafael Auler }
40316a497c6SRafael Auler
40416a497c6SRafael Auler // Defer to the next level
40516a497c6SRafael Auler if (Entry.Key & FollowUpTableMarker) {
40616a497c6SRafael Auler return getEntry(
40716a497c6SRafael Auler reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker),
40816a497c6SRafael Auler Key, Remainder, Alloc, CurLevel + 1);
40916a497c6SRafael Auler }
41016a497c6SRafael Auler
41116a497c6SRafael Auler // Conflict - create the next level
41247934c11SDenis Revunov // DEBUG(Entry.dump("Creating new level: "));
41347934c11SDenis Revunov
41416a497c6SRafael Auler MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize];
41547934c11SDenis Revunov // DEBUG(
41647934c11SDenis Revunov // reportNumber("Newly allocated level: 0x", uint64_t(NextLevelTbl),
41747934c11SDenis Revunov // 16));
41816a497c6SRafael Auler uint64_t CurEntrySelector = Entry.Key / InitialSize;
41916a497c6SRafael Auler for (int I = 0; I < CurLevel; ++I)
42016a497c6SRafael Auler CurEntrySelector /= IncSize;
42116a497c6SRafael Auler CurEntrySelector = CurEntrySelector % IncSize;
42216a497c6SRafael Auler NextLevelTbl[CurEntrySelector] = Entry;
42316a497c6SRafael Auler Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker;
424ad4e0770SDenis Revunov assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) !=
425ad4e0770SDenis Revunov uint64_t(Entries),
426ad4e0770SDenis Revunov "circular reference created!\n");
42747934c11SDenis Revunov // DEBUG(NextLevelTbl[CurEntrySelector].dump("New level entry: "));
42847934c11SDenis Revunov // DEBUG(Entry.dump("Updated old entry: "));
42916a497c6SRafael Auler return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1);
43016a497c6SRafael Auler }
4319aa134dcSVasily Leonenko
getOrAllocEntry(uint64_t Key,BumpPtrAllocator & Alloc)4329aa134dcSVasily Leonenko MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) {
4330cc19b56SDenis Revunov if (TableRoot) {
4340cc19b56SDenis Revunov MapEntry &E = getEntry(TableRoot, Key, Key, Alloc, 0);
4350cc19b56SDenis Revunov assert(!(E.Key & FollowUpTableMarker), "Invalid entry!");
4360cc19b56SDenis Revunov return E;
4370cc19b56SDenis Revunov }
4389aa134dcSVasily Leonenko return firstAllocation(Key, Alloc);
4399aa134dcSVasily Leonenko }
44016a497c6SRafael Auler };
44116a497c6SRafael Auler
resetIndCallCounter(T & Entry)44216a497c6SRafael Auler template <typename T> void resetIndCallCounter(T &Entry) {
44316a497c6SRafael Auler Entry.Val = 0;
44416a497c6SRafael Auler }
44516a497c6SRafael Auler
44616a497c6SRafael Auler template <typename T, uint32_t X, uint32_t Y>
resetCounters()44716a497c6SRafael Auler void SimpleHashTable<T, X, Y>::resetCounters() {
44816a497c6SRafael Auler forEachElement(resetIndCallCounter);
44916a497c6SRafael Auler }
45016a497c6SRafael Auler
45116a497c6SRafael Auler /// Represents a hash table mapping a function target address to its counter.
45216a497c6SRafael Auler using IndirectCallHashTable = SimpleHashTable<>;
45316a497c6SRafael Auler
45416a497c6SRafael Auler /// Initialize with number 1 instead of 0 so we don't go into .bss. This is the
45516a497c6SRafael Auler /// global array of all hash tables storing indirect call destinations happening
45616a497c6SRafael Auler /// during runtime, one table per call site.
45716a497c6SRafael Auler IndirectCallHashTable *GlobalIndCallCounters{
45816a497c6SRafael Auler reinterpret_cast<IndirectCallHashTable *>(1)};
45916a497c6SRafael Auler
46016a497c6SRafael Auler /// Don't allow reentrancy in the fdata writing phase - only one thread writes
46116a497c6SRafael Auler /// it
46216a497c6SRafael Auler Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)};
46316a497c6SRafael Auler
46416a497c6SRafael Auler /// Store number of calls in additional to target address (Key) and frequency
46516a497c6SRafael Auler /// as perceived by the basic block counter (Val).
46616a497c6SRafael Auler struct CallFlowEntryBase : public SimpleHashTableEntryBase {
46716a497c6SRafael Auler uint64_t Calls;
46816a497c6SRafael Auler };
46916a497c6SRafael Auler
47016a497c6SRafael Auler using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>;
47116a497c6SRafael Auler
47216a497c6SRafael Auler /// This is a large table indexing all possible call targets (indirect and
47316a497c6SRafael Auler /// direct ones). The goal is to find mismatches between number of calls (for
47416a497c6SRafael Auler /// those calls we were able to track) and the entry basic block counter of the
47516a497c6SRafael Auler /// callee. In most cases, these two should be equal. If not, there are two
47616a497c6SRafael Auler /// possible scenarios here:
47716a497c6SRafael Auler ///
47816a497c6SRafael Auler /// * Entry BB has higher frequency than all known calls to this function.
47916a497c6SRafael Auler /// In this case, we have dynamic library code or any uninstrumented code
48016a497c6SRafael Auler /// calling this function. We will write the profile for these untracked
48116a497c6SRafael Auler /// calls as having source "0 [unknown] 0" in the fdata file.
48216a497c6SRafael Auler ///
48316a497c6SRafael Auler /// * Number of known calls is higher than the frequency of entry BB
48416a497c6SRafael Auler /// This only happens when there is no counter for the entry BB / callee
48516a497c6SRafael Auler /// function is not simple (in BOLT terms). We don't do anything special
48616a497c6SRafael Auler /// here and just ignore those (we still report all calls to the non-simple
48716a497c6SRafael Auler /// function, though).
48816a497c6SRafael Auler ///
48916a497c6SRafael Auler class CallFlowHashTable : public CallFlowHashTableBase {
49016a497c6SRafael Auler public:
CallFlowHashTable(BumpPtrAllocator & Alloc)49116a497c6SRafael Auler CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
49216a497c6SRafael Auler
get(uint64_t Key)49316a497c6SRafael Auler MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); }
49416a497c6SRafael Auler
49516a497c6SRafael Auler private:
49616a497c6SRafael Auler // Different than the hash table for indirect call targets, we do store the
49716a497c6SRafael Auler // allocator here since there is only one call flow hash and space overhead
49816a497c6SRafael Auler // is negligible.
49916a497c6SRafael Auler BumpPtrAllocator &Alloc;
50016a497c6SRafael Auler };
50116a497c6SRafael Auler
50216a497c6SRafael Auler ///
50316a497c6SRafael Auler /// Description metadata emitted by BOLT to describe the program - refer to
50416a497c6SRafael Auler /// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote()
50516a497c6SRafael Auler ///
50616a497c6SRafael Auler struct Location {
50716a497c6SRafael Auler uint32_t FunctionName;
50816a497c6SRafael Auler uint32_t Offset;
50916a497c6SRafael Auler };
51016a497c6SRafael Auler
51116a497c6SRafael Auler struct CallDescription {
51216a497c6SRafael Auler Location From;
51316a497c6SRafael Auler uint32_t FromNode;
51416a497c6SRafael Auler Location To;
51516a497c6SRafael Auler uint32_t Counter;
51616a497c6SRafael Auler uint64_t TargetAddress;
51716a497c6SRafael Auler };
51816a497c6SRafael Auler
51916a497c6SRafael Auler using IndCallDescription = Location;
52016a497c6SRafael Auler
52116a497c6SRafael Auler struct IndCallTargetDescription {
52216a497c6SRafael Auler Location Loc;
52316a497c6SRafael Auler uint64_t Address;
52416a497c6SRafael Auler };
52516a497c6SRafael Auler
52616a497c6SRafael Auler struct EdgeDescription {
52716a497c6SRafael Auler Location From;
52816a497c6SRafael Auler uint32_t FromNode;
52916a497c6SRafael Auler Location To;
53016a497c6SRafael Auler uint32_t ToNode;
53116a497c6SRafael Auler uint32_t Counter;
53216a497c6SRafael Auler };
53316a497c6SRafael Auler
53416a497c6SRafael Auler struct InstrumentedNode {
53516a497c6SRafael Auler uint32_t Node;
53616a497c6SRafael Auler uint32_t Counter;
53716a497c6SRafael Auler };
53816a497c6SRafael Auler
53916a497c6SRafael Auler struct EntryNode {
54016a497c6SRafael Auler uint64_t Node;
54116a497c6SRafael Auler uint64_t Address;
54216a497c6SRafael Auler };
54316a497c6SRafael Auler
54416a497c6SRafael Auler struct FunctionDescription {
54516a497c6SRafael Auler uint32_t NumLeafNodes;
54616a497c6SRafael Auler const InstrumentedNode *LeafNodes;
54716a497c6SRafael Auler uint32_t NumEdges;
54816a497c6SRafael Auler const EdgeDescription *Edges;
54916a497c6SRafael Auler uint32_t NumCalls;
55016a497c6SRafael Auler const CallDescription *Calls;
55116a497c6SRafael Auler uint32_t NumEntryNodes;
55216a497c6SRafael Auler const EntryNode *EntryNodes;
55316a497c6SRafael Auler
55416a497c6SRafael Auler /// Constructor will parse the serialized function metadata written by BOLT
55516a497c6SRafael Auler FunctionDescription(const uint8_t *FuncDesc);
55616a497c6SRafael Auler
getSize__anonc11c1cb60211::FunctionDescription55716a497c6SRafael Auler uint64_t getSize() const {
55816a497c6SRafael Auler return 16 + NumLeafNodes * sizeof(InstrumentedNode) +
55916a497c6SRafael Auler NumEdges * sizeof(EdgeDescription) +
56016a497c6SRafael Auler NumCalls * sizeof(CallDescription) +
56116a497c6SRafael Auler NumEntryNodes * sizeof(EntryNode);
56216a497c6SRafael Auler }
56316a497c6SRafael Auler };
56416a497c6SRafael Auler
56516a497c6SRafael Auler /// The context is created when the fdata profile needs to be written to disk
56616a497c6SRafael Auler /// and we need to interpret our runtime counters. It contains pointers to the
56716a497c6SRafael Auler /// mmaped binary (only the BOLT written metadata section). Deserialization
56816a497c6SRafael Auler /// should be straightforward as most data is POD or an array of POD elements.
56916a497c6SRafael Auler /// This metadata is used to reconstruct function CFGs.
57016a497c6SRafael Auler struct ProfileWriterContext {
57116a497c6SRafael Auler IndCallDescription *IndCallDescriptions;
57216a497c6SRafael Auler IndCallTargetDescription *IndCallTargets;
57316a497c6SRafael Auler uint8_t *FuncDescriptions;
57416a497c6SRafael Auler char *Strings; // String table with function names used in this binary
57516a497c6SRafael Auler int FileDesc; // File descriptor for the file on disk backing this
57616a497c6SRafael Auler // information in memory via mmap
57716a497c6SRafael Auler void *MMapPtr; // The mmap ptr
57816a497c6SRafael Auler int MMapSize; // The mmap size
57916a497c6SRafael Auler
58016a497c6SRafael Auler /// Hash table storing all possible call destinations to detect untracked
58116a497c6SRafael Auler /// calls and correctly report them as [unknown] in output fdata.
58216a497c6SRafael Auler CallFlowHashTable *CallFlowTable;
58316a497c6SRafael Auler
58416a497c6SRafael Auler /// Lookup the sorted indirect call target vector to fetch function name and
58516a497c6SRafael Auler /// offset for an arbitrary function pointer.
58616a497c6SRafael Auler const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const;
58716a497c6SRafael Auler };
58816a497c6SRafael Auler
58916a497c6SRafael Auler /// Perform a string comparison and returns zero if Str1 matches Str2. Compares
59016a497c6SRafael Auler /// at most Size characters.
compareStr(const char * Str1,const char * Str2,int Size)591cc4b2fb6SRafael Auler int compareStr(const char *Str1, const char *Str2, int Size) {
592821480d2SRafael Auler while (*Str1 == *Str2) {
593821480d2SRafael Auler if (*Str1 == '\0' || --Size == 0)
594821480d2SRafael Auler return 0;
595821480d2SRafael Auler ++Str1;
596821480d2SRafael Auler ++Str2;
597821480d2SRafael Auler }
598821480d2SRafael Auler return 1;
599821480d2SRafael Auler }
600821480d2SRafael Auler
60116a497c6SRafael Auler /// Output Location to the fdata file
serializeLoc(const ProfileWriterContext & Ctx,char * OutBuf,const Location Loc,uint32_t BufSize)60216a497c6SRafael Auler char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf,
603cc4b2fb6SRafael Auler const Location Loc, uint32_t BufSize) {
604821480d2SRafael Auler // fdata location format: Type Name Offset
605821480d2SRafael Auler // Type 1 - regular symbol
606821480d2SRafael Auler OutBuf = strCopy(OutBuf, "1 ");
60716a497c6SRafael Auler const char *Str = Ctx.Strings + Loc.FunctionName;
608cc4b2fb6SRafael Auler uint32_t Size = 25;
60962aa74f8SRafael Auler while (*Str) {
61062aa74f8SRafael Auler *OutBuf++ = *Str++;
611cc4b2fb6SRafael Auler if (++Size >= BufSize)
612cc4b2fb6SRafael Auler break;
61362aa74f8SRafael Auler }
614cc4b2fb6SRafael Auler assert(!*Str, "buffer overflow, function name too large");
61562aa74f8SRafael Auler *OutBuf++ = ' ';
616821480d2SRafael Auler OutBuf = intToStr(OutBuf, Loc.Offset, 16);
61762aa74f8SRafael Auler *OutBuf++ = ' ';
61862aa74f8SRafael Auler return OutBuf;
61962aa74f8SRafael Auler }
62062aa74f8SRafael Auler
62116a497c6SRafael Auler /// Read and deserialize a function description written by BOLT. \p FuncDesc
62216a497c6SRafael Auler /// points at the beginning of the function metadata structure in the file.
62316a497c6SRafael Auler /// See Instrumentation::emitTablesAsELFNote()
FunctionDescription(const uint8_t * FuncDesc)62416a497c6SRafael Auler FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) {
62516a497c6SRafael Auler NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc);
62616a497c6SRafael Auler DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10));
62716a497c6SRafael Auler LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4);
62816a497c6SRafael Auler
62916a497c6SRafael Auler NumEdges = *reinterpret_cast<const uint32_t *>(
63016a497c6SRafael Auler FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode));
63116a497c6SRafael Auler DEBUG(reportNumber("NumEdges = ", NumEdges, 10));
63216a497c6SRafael Auler Edges = reinterpret_cast<const EdgeDescription *>(
63316a497c6SRafael Auler FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode));
63416a497c6SRafael Auler
63516a497c6SRafael Auler NumCalls = *reinterpret_cast<const uint32_t *>(
63616a497c6SRafael Auler FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) +
63716a497c6SRafael Auler NumEdges * sizeof(EdgeDescription));
63816a497c6SRafael Auler DEBUG(reportNumber("NumCalls = ", NumCalls, 10));
63916a497c6SRafael Auler Calls = reinterpret_cast<const CallDescription *>(
64016a497c6SRafael Auler FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
64116a497c6SRafael Auler NumEdges * sizeof(EdgeDescription));
64216a497c6SRafael Auler NumEntryNodes = *reinterpret_cast<const uint32_t *>(
64316a497c6SRafael Auler FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) +
64416a497c6SRafael Auler NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
64516a497c6SRafael Auler DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10));
64616a497c6SRafael Auler EntryNodes = reinterpret_cast<const EntryNode *>(
64716a497c6SRafael Auler FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) +
64816a497c6SRafael Auler NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription));
64916a497c6SRafael Auler }
65016a497c6SRafael Auler
65116a497c6SRafael Auler /// Read and mmap descriptions written by BOLT from the executable's notes
65216a497c6SRafael Auler /// section
653a0dd5b05SAlexander Shaposhnikov #if defined(HAVE_ELF_H) and !defined(__APPLE__)
6542ffd6e2bSElvina Yakubova
__get_pc()6552ffd6e2bSElvina Yakubova void *__attribute__((noinline)) __get_pc() {
6562ffd6e2bSElvina Yakubova return __builtin_extract_return_addr(__builtin_return_address(0));
6572ffd6e2bSElvina Yakubova }
6582ffd6e2bSElvina Yakubova
6592ffd6e2bSElvina Yakubova /// Get string with address and parse it to hex pair <StartAddress, EndAddress>
parseAddressRange(const char * Str,uint64_t & StartAddress,uint64_t & EndAddress)6602ffd6e2bSElvina Yakubova bool parseAddressRange(const char *Str, uint64_t &StartAddress,
6612ffd6e2bSElvina Yakubova uint64_t &EndAddress) {
6622ffd6e2bSElvina Yakubova if (!Str)
6632ffd6e2bSElvina Yakubova return false;
6642ffd6e2bSElvina Yakubova // Parsed string format: <hex1>-<hex2>
6652ffd6e2bSElvina Yakubova StartAddress = hexToLong(Str, '-');
6662ffd6e2bSElvina Yakubova while (*Str && *Str != '-')
6672ffd6e2bSElvina Yakubova ++Str;
6682ffd6e2bSElvina Yakubova if (!*Str)
6692ffd6e2bSElvina Yakubova return false;
6702ffd6e2bSElvina Yakubova ++Str; // swallow '-'
6712ffd6e2bSElvina Yakubova EndAddress = hexToLong(Str);
6722ffd6e2bSElvina Yakubova return true;
6732ffd6e2bSElvina Yakubova }
6742ffd6e2bSElvina Yakubova
6752ffd6e2bSElvina Yakubova /// Get full path to the real binary by getting current virtual address
6762ffd6e2bSElvina Yakubova /// and searching for the appropriate link in address range in
6772ffd6e2bSElvina Yakubova /// /proc/self/map_files
getBinaryPath()6782ffd6e2bSElvina Yakubova static char *getBinaryPath() {
6792ffd6e2bSElvina Yakubova const uint32_t BufSize = 1024;
68046bc197dSMarius Wachtler const uint32_t NameMax = 4096;
6812ffd6e2bSElvina Yakubova const char DirPath[] = "/proc/self/map_files/";
6822ffd6e2bSElvina Yakubova static char TargetPath[NameMax] = {};
6832ffd6e2bSElvina Yakubova char Buf[BufSize];
6842ffd6e2bSElvina Yakubova
685519cbbaaSVasily Leonenko if (__bolt_instr_binpath[0] != '\0')
686519cbbaaSVasily Leonenko return __bolt_instr_binpath;
687519cbbaaSVasily Leonenko
6882ffd6e2bSElvina Yakubova if (TargetPath[0] != '\0')
6892ffd6e2bSElvina Yakubova return TargetPath;
6902ffd6e2bSElvina Yakubova
6912ffd6e2bSElvina Yakubova unsigned long CurAddr = (unsigned long)__get_pc();
69260bbddf3SDenis Revunov uint64_t FDdir = __open(DirPath, O_RDONLY,
693821480d2SRafael Auler /*mode=*/0666);
6943b00a3a2SMarius Wachtler assert(static_cast<int64_t>(FDdir) >= 0,
6952ffd6e2bSElvina Yakubova "failed to open /proc/self/map_files");
6962ffd6e2bSElvina Yakubova
69787e9c424SElvina Yakubova while (long Nread = __getdents64(FDdir, (struct dirent64 *)Buf, BufSize)) {
6982ffd6e2bSElvina Yakubova assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries");
6992ffd6e2bSElvina Yakubova
70087e9c424SElvina Yakubova struct dirent64 *d;
7012ffd6e2bSElvina Yakubova for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) {
70287e9c424SElvina Yakubova d = (struct dirent64 *)(Buf + Bpos);
7032ffd6e2bSElvina Yakubova
7042ffd6e2bSElvina Yakubova uint64_t StartAddress, EndAddress;
7052ffd6e2bSElvina Yakubova if (!parseAddressRange(d->d_name, StartAddress, EndAddress))
7062ffd6e2bSElvina Yakubova continue;
7072ffd6e2bSElvina Yakubova if (CurAddr < StartAddress || CurAddr > EndAddress)
7082ffd6e2bSElvina Yakubova continue;
7092ffd6e2bSElvina Yakubova char FindBuf[NameMax];
7102ffd6e2bSElvina Yakubova char *C = strCopy(FindBuf, DirPath, NameMax);
7112ffd6e2bSElvina Yakubova C = strCopy(C, d->d_name, NameMax - (C - FindBuf));
7122ffd6e2bSElvina Yakubova *C = '\0';
7132ffd6e2bSElvina Yakubova uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath));
7142ffd6e2bSElvina Yakubova assert(Ret != -1 && Ret != BufSize, "readlink error");
7152ffd6e2bSElvina Yakubova TargetPath[Ret] = '\0';
7162ffd6e2bSElvina Yakubova return TargetPath;
7172ffd6e2bSElvina Yakubova }
7182ffd6e2bSElvina Yakubova }
7192ffd6e2bSElvina Yakubova return nullptr;
7202ffd6e2bSElvina Yakubova }
7212ffd6e2bSElvina Yakubova
readDescriptions()7222ffd6e2bSElvina Yakubova ProfileWriterContext readDescriptions() {
7232ffd6e2bSElvina Yakubova ProfileWriterContext Result;
7242ffd6e2bSElvina Yakubova char *BinPath = getBinaryPath();
7252ffd6e2bSElvina Yakubova assert(BinPath && BinPath[0] != '\0', "failed to find binary path");
7262ffd6e2bSElvina Yakubova
72760bbddf3SDenis Revunov uint64_t FD = __open(BinPath, O_RDONLY,
7282ffd6e2bSElvina Yakubova /*mode=*/0666);
7293b00a3a2SMarius Wachtler assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path");
7302ffd6e2bSElvina Yakubova
731821480d2SRafael Auler Result.FileDesc = FD;
732821480d2SRafael Auler
733821480d2SRafael Auler // mmap our binary to memory
73460bbddf3SDenis Revunov uint64_t Size = __lseek(FD, 0, SEEK_END);
735821480d2SRafael Auler uint8_t *BinContents = reinterpret_cast<uint8_t *>(
7368b23a853SDenis Revunov __mmap(0, Size, PROT_READ, MAP_PRIVATE, FD, 0));
7378ed172cfSDenis Revunov assert(BinContents != MAP_FAILED, "readDescriptions: Failed to mmap self!");
738821480d2SRafael Auler Result.MMapPtr = BinContents;
739821480d2SRafael Auler Result.MMapSize = Size;
740821480d2SRafael Auler Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents);
741821480d2SRafael Auler Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff);
742821480d2SRafael Auler Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>(
743821480d2SRafael Auler BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize);
744821480d2SRafael Auler
745821480d2SRafael Auler // Find .bolt.instr.tables with the data we need and set pointers to it
746821480d2SRafael Auler for (int I = 0; I < Hdr->e_shnum; ++I) {
747821480d2SRafael Auler char *SecName = reinterpret_cast<char *>(
748821480d2SRafael Auler BinContents + StringTblHeader->sh_offset + Shdr->sh_name);
749821480d2SRafael Auler if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) {
750821480d2SRafael Auler Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff +
751821480d2SRafael Auler (I + 1) * Hdr->e_shentsize);
752821480d2SRafael Auler continue;
753821480d2SRafael Auler }
754821480d2SRafael Auler // Actual contents of the ELF note start after offset 20 decimal:
755821480d2SRafael Auler // Offset 0: Producer name size (4 bytes)
756821480d2SRafael Auler // Offset 4: Contents size (4 bytes)
757821480d2SRafael Auler // Offset 8: Note type (4 bytes)
758821480d2SRafael Auler // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary)
759821480d2SRafael Auler // Offset 20: Contents
76016a497c6SRafael Auler uint32_t IndCallDescSize =
761cc4b2fb6SRafael Auler *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20);
76216a497c6SRafael Auler uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>(
76316a497c6SRafael Auler BinContents + Shdr->sh_offset + 24 + IndCallDescSize);
76416a497c6SRafael Auler uint32_t FuncDescSize =
76516a497c6SRafael Auler *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 +
76616a497c6SRafael Auler IndCallDescSize + IndCallTargetDescSize);
76716a497c6SRafael Auler Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>(
76816a497c6SRafael Auler BinContents + Shdr->sh_offset + 24);
76916a497c6SRafael Auler Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
77016a497c6SRafael Auler BinContents + Shdr->sh_offset + 28 + IndCallDescSize);
77116a497c6SRafael Auler Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 +
77216a497c6SRafael Auler IndCallDescSize + IndCallTargetDescSize;
77316a497c6SRafael Auler Result.Strings = reinterpret_cast<char *>(
77416a497c6SRafael Auler BinContents + Shdr->sh_offset + 32 + IndCallDescSize +
77516a497c6SRafael Auler IndCallTargetDescSize + FuncDescSize);
776821480d2SRafael Auler return Result;
777821480d2SRafael Auler }
778821480d2SRafael Auler const char ErrMsg[] =
779821480d2SRafael Auler "BOLT instrumentation runtime error: could not find section "
780821480d2SRafael Auler ".bolt.instr.tables\n";
781821480d2SRafael Auler reportError(ErrMsg, sizeof(ErrMsg));
782821480d2SRafael Auler return Result;
783821480d2SRafael Auler }
784a0dd5b05SAlexander Shaposhnikov
785ba31344fSRafael Auler #else
786a0dd5b05SAlexander Shaposhnikov
readDescriptions()78716a497c6SRafael Auler ProfileWriterContext readDescriptions() {
78816a497c6SRafael Auler ProfileWriterContext Result;
789a0dd5b05SAlexander Shaposhnikov uint8_t *Tables = _bolt_instr_tables_getter();
790a0dd5b05SAlexander Shaposhnikov uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables);
791a0dd5b05SAlexander Shaposhnikov uint32_t IndCallTargetDescSize =
792a0dd5b05SAlexander Shaposhnikov *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize);
793a0dd5b05SAlexander Shaposhnikov uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>(
794a0dd5b05SAlexander Shaposhnikov Tables + 8 + IndCallDescSize + IndCallTargetDescSize);
795a0dd5b05SAlexander Shaposhnikov Result.IndCallDescriptions =
796a0dd5b05SAlexander Shaposhnikov reinterpret_cast<IndCallDescription *>(Tables + 4);
797a0dd5b05SAlexander Shaposhnikov Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>(
798a0dd5b05SAlexander Shaposhnikov Tables + 8 + IndCallDescSize);
799a0dd5b05SAlexander Shaposhnikov Result.FuncDescriptions =
800a0dd5b05SAlexander Shaposhnikov Tables + 12 + IndCallDescSize + IndCallTargetDescSize;
801a0dd5b05SAlexander Shaposhnikov Result.Strings = reinterpret_cast<char *>(
802a0dd5b05SAlexander Shaposhnikov Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize);
803ba31344fSRafael Auler return Result;
804ba31344fSRafael Auler }
805a0dd5b05SAlexander Shaposhnikov
806ba31344fSRafael Auler #endif
807821480d2SRafael Auler
808a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
80916a497c6SRafael Auler /// Debug by printing overall metadata global numbers to check it is sane
printStats(const ProfileWriterContext & Ctx)81016a497c6SRafael Auler void printStats(const ProfileWriterContext &Ctx) {
811cc4b2fb6SRafael Auler char StatMsg[BufSize];
812cc4b2fb6SRafael Auler char *StatPtr = StatMsg;
81316a497c6SRafael Auler StatPtr =
81416a497c6SRafael Auler strCopy(StatPtr,
81516a497c6SRafael Auler "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: ");
816cc4b2fb6SRafael Auler StatPtr = intToStr(StatPtr,
81716a497c6SRafael Auler Ctx.FuncDescriptions -
81816a497c6SRafael Auler reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions),
819cc4b2fb6SRafael Auler 10);
820cc4b2fb6SRafael Auler StatPtr = strCopy(StatPtr, "\nFuncDescSize: ");
821cc4b2fb6SRafael Auler StatPtr = intToStr(
822cc4b2fb6SRafael Auler StatPtr,
82316a497c6SRafael Auler reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10);
82416a497c6SRafael Auler StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: ");
82516a497c6SRafael Auler StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10);
826cc4b2fb6SRafael Auler StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: ");
827cc4b2fb6SRafael Auler StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10);
828cc4b2fb6SRafael Auler StatPtr = strCopy(StatPtr, "\n");
829cc4b2fb6SRafael Auler __write(2, StatMsg, StatPtr - StatMsg);
830cc4b2fb6SRafael Auler }
831a0dd5b05SAlexander Shaposhnikov #endif
832a0dd5b05SAlexander Shaposhnikov
833cc4b2fb6SRafael Auler
834cc4b2fb6SRafael Auler /// This is part of a simple CFG representation in memory, where we store
835cc4b2fb6SRafael Auler /// a dynamically sized array of input and output edges per node, and store
836cc4b2fb6SRafael Auler /// a dynamically sized array of nodes per graph. We also store the spanning
837cc4b2fb6SRafael Auler /// tree edges for that CFG in a separate array of nodes in
838cc4b2fb6SRafael Auler /// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes.
839cc4b2fb6SRafael Auler struct Edge {
840cc4b2fb6SRafael Auler uint32_t Node; // Index in nodes array regarding the destination of this edge
841cc4b2fb6SRafael Auler uint32_t ID; // Edge index in an array comprising all edges of the graph
842cc4b2fb6SRafael Auler };
843cc4b2fb6SRafael Auler
844cc4b2fb6SRafael Auler /// A regular graph node or a spanning tree node
845cc4b2fb6SRafael Auler struct Node {
846cc4b2fb6SRafael Auler uint32_t NumInEdges{0}; // Input edge count used to size InEdge
847cc4b2fb6SRafael Auler uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges
848cc4b2fb6SRafael Auler Edge *InEdges{nullptr}; // Created and managed by \p Graph
849cc4b2fb6SRafael Auler Edge *OutEdges{nullptr}; // ditto
850cc4b2fb6SRafael Auler };
851cc4b2fb6SRafael Auler
852cc4b2fb6SRafael Auler /// Main class for CFG representation in memory. Manages object creation and
853cc4b2fb6SRafael Auler /// destruction, populates an array of CFG nodes as well as corresponding
854cc4b2fb6SRafael Auler /// spanning tree nodes.
855cc4b2fb6SRafael Auler struct Graph {
856cc4b2fb6SRafael Auler uint32_t NumNodes;
857cc4b2fb6SRafael Auler Node *CFGNodes;
858cc4b2fb6SRafael Auler Node *SpanningTreeNodes;
85916a497c6SRafael Auler uint64_t *EdgeFreqs;
86016a497c6SRafael Auler uint64_t *CallFreqs;
861cc4b2fb6SRafael Auler BumpPtrAllocator &Alloc;
86216a497c6SRafael Auler const FunctionDescription &D;
863cc4b2fb6SRafael Auler
86416a497c6SRafael Auler /// Reads a list of edges from function description \p D and builds
865cc4b2fb6SRafael Auler /// the graph from it. Allocates several internal dynamic structures that are
86616a497c6SRafael Auler /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all
867cc4b2fb6SRafael Auler /// spanning tree leaf nodes descriptions (their counters). They are the seed
868cc4b2fb6SRafael Auler /// used to compute the rest of the missing edge counts in a bottom-up
869cc4b2fb6SRafael Auler /// traversal of the spanning tree.
87016a497c6SRafael Auler Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
87116a497c6SRafael Auler const uint64_t *Counters, ProfileWriterContext &Ctx);
872cc4b2fb6SRafael Auler ~Graph();
873cc4b2fb6SRafael Auler void dump() const;
87416a497c6SRafael Auler
87516a497c6SRafael Auler private:
87616a497c6SRafael Auler void computeEdgeFrequencies(const uint64_t *Counters,
87716a497c6SRafael Auler ProfileWriterContext &Ctx);
87816a497c6SRafael Auler void dumpEdgeFreqs() const;
879cc4b2fb6SRafael Auler };
880cc4b2fb6SRafael Auler
Graph(BumpPtrAllocator & Alloc,const FunctionDescription & D,const uint64_t * Counters,ProfileWriterContext & Ctx)88116a497c6SRafael Auler Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D,
88216a497c6SRafael Auler const uint64_t *Counters, ProfileWriterContext &Ctx)
88316a497c6SRafael Auler : Alloc(Alloc), D(D) {
884cc4b2fb6SRafael Auler DEBUG(reportNumber("G = 0x", (uint64_t)this, 16));
885cc4b2fb6SRafael Auler // First pass to determine number of nodes
88616a497c6SRafael Auler int32_t MaxNodes = -1;
88716a497c6SRafael Auler CallFreqs = nullptr;
88816a497c6SRafael Auler EdgeFreqs = nullptr;
88916a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) {
89016a497c6SRafael Auler if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes)
89116a497c6SRafael Auler MaxNodes = D.Edges[I].FromNode;
89216a497c6SRafael Auler if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes)
89316a497c6SRafael Auler MaxNodes = D.Edges[I].ToNode;
894cc4b2fb6SRafael Auler }
895a0dd5b05SAlexander Shaposhnikov
896883bf0e8SAmir Ayupov for (int I = 0; I < D.NumLeafNodes; ++I)
89716a497c6SRafael Auler if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes)
89816a497c6SRafael Auler MaxNodes = D.LeafNodes[I].Node;
899883bf0e8SAmir Ayupov
900883bf0e8SAmir Ayupov for (int I = 0; I < D.NumCalls; ++I)
90116a497c6SRafael Auler if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes)
90216a497c6SRafael Auler MaxNodes = D.Calls[I].FromNode;
903883bf0e8SAmir Ayupov
90416a497c6SRafael Auler // No nodes? Nothing to do
90516a497c6SRafael Auler if (MaxNodes < 0) {
90616a497c6SRafael Auler DEBUG(report("No nodes!\n"));
907cc4b2fb6SRafael Auler CFGNodes = nullptr;
908cc4b2fb6SRafael Auler SpanningTreeNodes = nullptr;
909cc4b2fb6SRafael Auler NumNodes = 0;
910cc4b2fb6SRafael Auler return;
911cc4b2fb6SRafael Auler }
912cc4b2fb6SRafael Auler ++MaxNodes;
913cc4b2fb6SRafael Auler DEBUG(reportNumber("NumNodes = ", MaxNodes, 10));
91416a497c6SRafael Auler NumNodes = static_cast<uint32_t>(MaxNodes);
915cc4b2fb6SRafael Auler
916cc4b2fb6SRafael Auler // Initial allocations
917cc4b2fb6SRafael Auler CFGNodes = new (Alloc) Node[MaxNodes];
918a0dd5b05SAlexander Shaposhnikov
919cc4b2fb6SRafael Auler DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16));
920cc4b2fb6SRafael Auler SpanningTreeNodes = new (Alloc) Node[MaxNodes];
921cc4b2fb6SRafael Auler DEBUG(reportNumber("G->SpanningTreeNodes = 0x",
922cc4b2fb6SRafael Auler (uint64_t)SpanningTreeNodes, 16));
923cc4b2fb6SRafael Auler
924cc4b2fb6SRafael Auler // Figure out how much to allocate to each vector (in/out edge sets)
92516a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) {
92616a497c6SRafael Auler CFGNodes[D.Edges[I].FromNode].NumOutEdges++;
92716a497c6SRafael Auler CFGNodes[D.Edges[I].ToNode].NumInEdges++;
92816a497c6SRafael Auler if (D.Edges[I].Counter != 0xffffffff)
929cc4b2fb6SRafael Auler continue;
930cc4b2fb6SRafael Auler
93116a497c6SRafael Auler SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++;
93216a497c6SRafael Auler SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++;
933cc4b2fb6SRafael Auler }
934cc4b2fb6SRafael Auler
935cc4b2fb6SRafael Auler // Allocate in/out edge sets
936cc4b2fb6SRafael Auler for (int I = 0; I < MaxNodes; ++I) {
937cc4b2fb6SRafael Auler if (CFGNodes[I].NumInEdges > 0)
938cc4b2fb6SRafael Auler CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges];
939cc4b2fb6SRafael Auler if (CFGNodes[I].NumOutEdges > 0)
940cc4b2fb6SRafael Auler CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges];
941cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].NumInEdges > 0)
942cc4b2fb6SRafael Auler SpanningTreeNodes[I].InEdges =
943cc4b2fb6SRafael Auler new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges];
944cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].NumOutEdges > 0)
945cc4b2fb6SRafael Auler SpanningTreeNodes[I].OutEdges =
946cc4b2fb6SRafael Auler new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges];
947cc4b2fb6SRafael Auler CFGNodes[I].NumInEdges = 0;
948cc4b2fb6SRafael Auler CFGNodes[I].NumOutEdges = 0;
949cc4b2fb6SRafael Auler SpanningTreeNodes[I].NumInEdges = 0;
950cc4b2fb6SRafael Auler SpanningTreeNodes[I].NumOutEdges = 0;
951cc4b2fb6SRafael Auler }
952cc4b2fb6SRafael Auler
953cc4b2fb6SRafael Auler // Fill in/out edge sets
95416a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) {
95516a497c6SRafael Auler const uint32_t Src = D.Edges[I].FromNode;
95616a497c6SRafael Auler const uint32_t Dst = D.Edges[I].ToNode;
957cc4b2fb6SRafael Auler Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++];
958cc4b2fb6SRafael Auler E->Node = Dst;
959cc4b2fb6SRafael Auler E->ID = I;
960cc4b2fb6SRafael Auler
961cc4b2fb6SRafael Auler E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++];
962cc4b2fb6SRafael Auler E->Node = Src;
963cc4b2fb6SRafael Auler E->ID = I;
964cc4b2fb6SRafael Auler
96516a497c6SRafael Auler if (D.Edges[I].Counter != 0xffffffff)
966cc4b2fb6SRafael Auler continue;
967cc4b2fb6SRafael Auler
968cc4b2fb6SRafael Auler E = &SpanningTreeNodes[Src]
969cc4b2fb6SRafael Auler .OutEdges[SpanningTreeNodes[Src].NumOutEdges++];
970cc4b2fb6SRafael Auler E->Node = Dst;
971cc4b2fb6SRafael Auler E->ID = I;
972cc4b2fb6SRafael Auler
973cc4b2fb6SRafael Auler E = &SpanningTreeNodes[Dst]
974cc4b2fb6SRafael Auler .InEdges[SpanningTreeNodes[Dst].NumInEdges++];
975cc4b2fb6SRafael Auler E->Node = Src;
976cc4b2fb6SRafael Auler E->ID = I;
977cc4b2fb6SRafael Auler }
97816a497c6SRafael Auler
97916a497c6SRafael Auler computeEdgeFrequencies(Counters, Ctx);
980cc4b2fb6SRafael Auler }
981cc4b2fb6SRafael Auler
~Graph()982cc4b2fb6SRafael Auler Graph::~Graph() {
98316a497c6SRafael Auler if (CallFreqs)
98416a497c6SRafael Auler Alloc.deallocate(CallFreqs);
98516a497c6SRafael Auler if (EdgeFreqs)
98616a497c6SRafael Auler Alloc.deallocate(EdgeFreqs);
987cc4b2fb6SRafael Auler for (int I = NumNodes - 1; I >= 0; --I) {
988cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].OutEdges)
989cc4b2fb6SRafael Auler Alloc.deallocate(SpanningTreeNodes[I].OutEdges);
990cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].InEdges)
991cc4b2fb6SRafael Auler Alloc.deallocate(SpanningTreeNodes[I].InEdges);
992cc4b2fb6SRafael Auler if (CFGNodes[I].OutEdges)
993cc4b2fb6SRafael Auler Alloc.deallocate(CFGNodes[I].OutEdges);
994cc4b2fb6SRafael Auler if (CFGNodes[I].InEdges)
995cc4b2fb6SRafael Auler Alloc.deallocate(CFGNodes[I].InEdges);
996cc4b2fb6SRafael Auler }
997cc4b2fb6SRafael Auler if (SpanningTreeNodes)
998cc4b2fb6SRafael Auler Alloc.deallocate(SpanningTreeNodes);
999cc4b2fb6SRafael Auler if (CFGNodes)
1000cc4b2fb6SRafael Auler Alloc.deallocate(CFGNodes);
1001cc4b2fb6SRafael Auler }
1002cc4b2fb6SRafael Auler
dump() const1003cc4b2fb6SRafael Auler void Graph::dump() const {
1004cc4b2fb6SRafael Auler reportNumber("Dumping graph with number of nodes: ", NumNodes, 10);
1005cc4b2fb6SRafael Auler report(" Full graph:\n");
1006cc4b2fb6SRafael Auler for (int I = 0; I < NumNodes; ++I) {
1007cc4b2fb6SRafael Auler const Node *N = &CFGNodes[I];
1008cc4b2fb6SRafael Auler reportNumber(" Node #", I, 10);
1009cc4b2fb6SRafael Auler reportNumber(" InEdges total ", N->NumInEdges, 10);
1010cc4b2fb6SRafael Auler for (int J = 0; J < N->NumInEdges; ++J)
1011cc4b2fb6SRafael Auler reportNumber(" ", N->InEdges[J].Node, 10);
1012cc4b2fb6SRafael Auler reportNumber(" OutEdges total ", N->NumOutEdges, 10);
1013cc4b2fb6SRafael Auler for (int J = 0; J < N->NumOutEdges; ++J)
1014cc4b2fb6SRafael Auler reportNumber(" ", N->OutEdges[J].Node, 10);
1015cc4b2fb6SRafael Auler report("\n");
1016cc4b2fb6SRafael Auler }
1017cc4b2fb6SRafael Auler report(" Spanning tree:\n");
1018cc4b2fb6SRafael Auler for (int I = 0; I < NumNodes; ++I) {
1019cc4b2fb6SRafael Auler const Node *N = &SpanningTreeNodes[I];
1020cc4b2fb6SRafael Auler reportNumber(" Node #", I, 10);
1021cc4b2fb6SRafael Auler reportNumber(" InEdges total ", N->NumInEdges, 10);
1022cc4b2fb6SRafael Auler for (int J = 0; J < N->NumInEdges; ++J)
1023cc4b2fb6SRafael Auler reportNumber(" ", N->InEdges[J].Node, 10);
1024cc4b2fb6SRafael Auler reportNumber(" OutEdges total ", N->NumOutEdges, 10);
1025cc4b2fb6SRafael Auler for (int J = 0; J < N->NumOutEdges; ++J)
1026cc4b2fb6SRafael Auler reportNumber(" ", N->OutEdges[J].Node, 10);
1027cc4b2fb6SRafael Auler report("\n");
1028cc4b2fb6SRafael Auler }
1029cc4b2fb6SRafael Auler }
1030cc4b2fb6SRafael Auler
dumpEdgeFreqs() const103116a497c6SRafael Auler void Graph::dumpEdgeFreqs() const {
103216a497c6SRafael Auler reportNumber(
103316a497c6SRafael Auler "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10);
103416a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) {
103516a497c6SRafael Auler reportNumber("* Src: ", D.Edges[I].FromNode, 10);
103616a497c6SRafael Auler reportNumber(" Dst: ", D.Edges[I].ToNode, 10);
1037cc4b2fb6SRafael Auler reportNumber(" Cnt: ", EdgeFreqs[I], 10);
1038cc4b2fb6SRafael Auler }
1039cc4b2fb6SRafael Auler }
1040cc4b2fb6SRafael Auler
104116a497c6SRafael Auler /// Auxiliary map structure for fast lookups of which calls map to each node of
104216a497c6SRafael Auler /// the function CFG
104316a497c6SRafael Auler struct NodeToCallsMap {
104416a497c6SRafael Auler struct MapEntry {
104516a497c6SRafael Auler uint32_t NumCalls;
104616a497c6SRafael Auler uint32_t *Calls;
104716a497c6SRafael Auler };
104816a497c6SRafael Auler MapEntry *Entries;
104916a497c6SRafael Auler BumpPtrAllocator &Alloc;
105016a497c6SRafael Auler const uint32_t NumNodes;
1051cc4b2fb6SRafael Auler
NodeToCallsMap__anonc11c1cb60211::NodeToCallsMap105216a497c6SRafael Auler NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D,
105316a497c6SRafael Auler uint32_t NumNodes)
105416a497c6SRafael Auler : Alloc(Alloc), NumNodes(NumNodes) {
105516a497c6SRafael Auler Entries = new (Alloc, 0) MapEntry[NumNodes];
105616a497c6SRafael Auler for (int I = 0; I < D.NumCalls; ++I) {
105716a497c6SRafael Auler DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10));
105816a497c6SRafael Auler ++Entries[D.Calls[I].FromNode].NumCalls;
105916a497c6SRafael Auler }
106016a497c6SRafael Auler for (int I = 0; I < NumNodes; ++I) {
106116a497c6SRafael Auler Entries[I].Calls = Entries[I].NumCalls ? new (Alloc)
106216a497c6SRafael Auler uint32_t[Entries[I].NumCalls]
106316a497c6SRafael Auler : nullptr;
106416a497c6SRafael Auler Entries[I].NumCalls = 0;
106516a497c6SRafael Auler }
106616a497c6SRafael Auler for (int I = 0; I < D.NumCalls; ++I) {
1067c7306cc2SAmir Ayupov MapEntry &Entry = Entries[D.Calls[I].FromNode];
106816a497c6SRafael Auler Entry.Calls[Entry.NumCalls++] = I;
106916a497c6SRafael Auler }
107016a497c6SRafael Auler }
107116a497c6SRafael Auler
107216a497c6SRafael Auler /// Set the frequency of all calls in node \p NodeID to Freq. However, if
107316a497c6SRafael Auler /// the calls have their own counters and do not depend on the basic block
107416a497c6SRafael Auler /// counter, this means they have landing pads and throw exceptions. In this
107516a497c6SRafael Auler /// case, set their frequency with their counters and return the maximum
107616a497c6SRafael Auler /// value observed in such counters. This will be used as the new frequency
107716a497c6SRafael Auler /// at basic block entry. This is used to fix the CFG edge frequencies in the
107816a497c6SRafael Auler /// presence of exceptions.
visitAllCallsIn__anonc11c1cb60211::NodeToCallsMap107916a497c6SRafael Auler uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs,
108016a497c6SRafael Auler const FunctionDescription &D,
108116a497c6SRafael Auler const uint64_t *Counters,
108216a497c6SRafael Auler ProfileWriterContext &Ctx) const {
1083c7306cc2SAmir Ayupov const MapEntry &Entry = Entries[NodeID];
108416a497c6SRafael Auler uint64_t MaxValue = 0ull;
108516a497c6SRafael Auler for (int I = 0, E = Entry.NumCalls; I != E; ++I) {
1086c7306cc2SAmir Ayupov const uint32_t CallID = Entry.Calls[I];
108716a497c6SRafael Auler DEBUG(reportNumber(" Setting freq for call ID: ", CallID, 10));
1088c7306cc2SAmir Ayupov const CallDescription &CallDesc = D.Calls[CallID];
108916a497c6SRafael Auler if (CallDesc.Counter == 0xffffffff) {
109016a497c6SRafael Auler CallFreqs[CallID] = Freq;
109116a497c6SRafael Auler DEBUG(reportNumber(" with : ", Freq, 10));
109216a497c6SRafael Auler } else {
1093c7306cc2SAmir Ayupov const uint64_t CounterVal = Counters[CallDesc.Counter];
109416a497c6SRafael Auler CallFreqs[CallID] = CounterVal;
109516a497c6SRafael Auler MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue;
109616a497c6SRafael Auler DEBUG(reportNumber(" with (private counter) : ", CounterVal, 10));
109716a497c6SRafael Auler }
109816a497c6SRafael Auler DEBUG(reportNumber(" Address: 0x", CallDesc.TargetAddress, 16));
109916a497c6SRafael Auler if (CallFreqs[CallID] > 0)
110016a497c6SRafael Auler Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls +=
110116a497c6SRafael Auler CallFreqs[CallID];
110216a497c6SRafael Auler }
110316a497c6SRafael Auler return MaxValue;
110416a497c6SRafael Auler }
110516a497c6SRafael Auler
~NodeToCallsMap__anonc11c1cb60211::NodeToCallsMap110616a497c6SRafael Auler ~NodeToCallsMap() {
1107883bf0e8SAmir Ayupov for (int I = NumNodes - 1; I >= 0; --I)
110816a497c6SRafael Auler if (Entries[I].Calls)
110916a497c6SRafael Auler Alloc.deallocate(Entries[I].Calls);
111016a497c6SRafael Auler Alloc.deallocate(Entries);
111116a497c6SRafael Auler }
111216a497c6SRafael Auler };
111316a497c6SRafael Auler
111416a497c6SRafael Auler /// Fill an array with the frequency of each edge in the function represented
111516a497c6SRafael Auler /// by G, as well as another array for each call.
computeEdgeFrequencies(const uint64_t * Counters,ProfileWriterContext & Ctx)111616a497c6SRafael Auler void Graph::computeEdgeFrequencies(const uint64_t *Counters,
111716a497c6SRafael Auler ProfileWriterContext &Ctx) {
111816a497c6SRafael Auler if (NumNodes == 0)
111916a497c6SRafael Auler return;
112016a497c6SRafael Auler
112116a497c6SRafael Auler EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr;
112216a497c6SRafael Auler CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr;
112316a497c6SRafael Auler
112416a497c6SRafael Auler // Setup a lookup for calls present in each node (BB)
112516a497c6SRafael Auler NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes);
1126cc4b2fb6SRafael Auler
1127cc4b2fb6SRafael Auler // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the
1128cc4b2fb6SRafael Auler // spanning tree don't have explicit counters. We must infer their value using
1129cc4b2fb6SRafael Auler // a linear combination of other counters (sum of counters of the outgoing
1130cc4b2fb6SRafael Auler // edges minus sum of counters of the incoming edges).
113116a497c6SRafael Auler uint32_t *Stack = new (Alloc) uint32_t [NumNodes];
1132cc4b2fb6SRafael Auler uint32_t StackTop = 0;
1133cc4b2fb6SRafael Auler enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED };
113416a497c6SRafael Auler Status *Visited = new (Alloc, 0) Status[NumNodes];
113516a497c6SRafael Auler uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes];
113616a497c6SRafael Auler uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes];
1137cc4b2fb6SRafael Auler
1138cc4b2fb6SRafael Auler // Setup a fast lookup for frequency of leaf nodes, which have special
1139cc4b2fb6SRafael Auler // basic block frequency instrumentation (they are not edge profiled).
114016a497c6SRafael Auler for (int I = 0; I < D.NumLeafNodes; ++I) {
114116a497c6SRafael Auler LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter];
1142cc4b2fb6SRafael Auler DEBUG({
114316a497c6SRafael Auler if (Counters[D.LeafNodes[I].Counter] > 0) {
114416a497c6SRafael Auler reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10);
114516a497c6SRafael Auler reportNumber(" Counter: ", Counters[D.LeafNodes[I].Counter], 10);
1146cc4b2fb6SRafael Auler }
1147cc4b2fb6SRafael Auler });
114816a497c6SRafael Auler }
114916a497c6SRafael Auler for (int I = 0; I < D.NumEntryNodes; ++I) {
115016a497c6SRafael Auler EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address;
115116a497c6SRafael Auler DEBUG({
115216a497c6SRafael Auler reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10);
115316a497c6SRafael Auler reportNumber(" Address: ", D.EntryNodes[I].Address, 16);
115416a497c6SRafael Auler });
1155cc4b2fb6SRafael Auler }
1156cc4b2fb6SRafael Auler // Add all root nodes to the stack
1157883bf0e8SAmir Ayupov for (int I = 0; I < NumNodes; ++I)
115816a497c6SRafael Auler if (SpanningTreeNodes[I].NumInEdges == 0)
1159cc4b2fb6SRafael Auler Stack[StackTop++] = I;
1160883bf0e8SAmir Ayupov
1161cc4b2fb6SRafael Auler // Empty stack?
1162cc4b2fb6SRafael Auler if (StackTop == 0) {
116316a497c6SRafael Auler DEBUG(report("Empty stack!\n"));
116416a497c6SRafael Auler Alloc.deallocate(EntryAddress);
1165cc4b2fb6SRafael Auler Alloc.deallocate(LeafFrequency);
1166cc4b2fb6SRafael Auler Alloc.deallocate(Visited);
1167cc4b2fb6SRafael Auler Alloc.deallocate(Stack);
116816a497c6SRafael Auler CallMap->~NodeToCallsMap();
116916a497c6SRafael Auler Alloc.deallocate(CallMap);
117016a497c6SRafael Auler if (CallFreqs)
117116a497c6SRafael Auler Alloc.deallocate(CallFreqs);
117216a497c6SRafael Auler if (EdgeFreqs)
117316a497c6SRafael Auler Alloc.deallocate(EdgeFreqs);
117416a497c6SRafael Auler EdgeFreqs = nullptr;
117516a497c6SRafael Auler CallFreqs = nullptr;
117616a497c6SRafael Auler return;
1177cc4b2fb6SRafael Auler }
1178cc4b2fb6SRafael Auler // Add all known edge counts, will infer the rest
117916a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) {
118016a497c6SRafael Auler const uint32_t C = D.Edges[I].Counter;
1181cc4b2fb6SRafael Auler if (C == 0xffffffff) // inferred counter - we will compute its value
1182cc4b2fb6SRafael Auler continue;
118316a497c6SRafael Auler EdgeFreqs[I] = Counters[C];
1184cc4b2fb6SRafael Auler }
1185cc4b2fb6SRafael Auler
1186cc4b2fb6SRafael Auler while (StackTop > 0) {
1187cc4b2fb6SRafael Auler const uint32_t Cur = Stack[--StackTop];
1188cc4b2fb6SRafael Auler DEBUG({
1189cc4b2fb6SRafael Auler if (Visited[Cur] == S_VISITING)
1190cc4b2fb6SRafael Auler report("(visiting) ");
1191cc4b2fb6SRafael Auler else
1192cc4b2fb6SRafael Auler report("(new) ");
1193cc4b2fb6SRafael Auler reportNumber("Cur: ", Cur, 10);
1194cc4b2fb6SRafael Auler });
1195cc4b2fb6SRafael Auler
1196cc4b2fb6SRafael Auler // This shouldn't happen in a tree
1197cc4b2fb6SRafael Auler assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack");
1198cc4b2fb6SRafael Auler if (Visited[Cur] == S_NEW) {
1199cc4b2fb6SRafael Auler Visited[Cur] = S_VISITING;
1200cc4b2fb6SRafael Auler Stack[StackTop++] = Cur;
120116a497c6SRafael Auler assert(StackTop <= NumNodes, "stack grew too large");
120216a497c6SRafael Auler for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) {
120316a497c6SRafael Auler const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node;
1204cc4b2fb6SRafael Auler Stack[StackTop++] = Succ;
120516a497c6SRafael Auler assert(StackTop <= NumNodes, "stack grew too large");
1206cc4b2fb6SRafael Auler }
1207cc4b2fb6SRafael Auler continue;
1208cc4b2fb6SRafael Auler }
1209cc4b2fb6SRafael Auler Visited[Cur] = S_VISITED;
1210cc4b2fb6SRafael Auler
1211cc4b2fb6SRafael Auler // Establish our node frequency based on outgoing edges, which should all be
1212cc4b2fb6SRafael Auler // resolved by now.
1213cc4b2fb6SRafael Auler int64_t CurNodeFreq = LeafFrequency[Cur];
1214cc4b2fb6SRafael Auler // Not a leaf?
1215cc4b2fb6SRafael Auler if (!CurNodeFreq) {
121616a497c6SRafael Auler for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) {
121716a497c6SRafael Auler const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID;
121816a497c6SRafael Auler CurNodeFreq += EdgeFreqs[SuccEdge];
1219cc4b2fb6SRafael Auler }
1220cc4b2fb6SRafael Auler }
122116a497c6SRafael Auler if (CurNodeFreq < 0)
122216a497c6SRafael Auler CurNodeFreq = 0;
122316a497c6SRafael Auler
122416a497c6SRafael Auler const uint64_t CallFreq = CallMap->visitAllCallsIn(
122516a497c6SRafael Auler Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx);
122616a497c6SRafael Auler
122716a497c6SRafael Auler // Exception handling affected our output flow? Fix with calls info
122816a497c6SRafael Auler DEBUG({
122916a497c6SRafael Auler if (CallFreq > CurNodeFreq)
123016a497c6SRafael Auler report("Bumping node frequency with call info\n");
123116a497c6SRafael Auler });
123216a497c6SRafael Auler CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq;
123316a497c6SRafael Auler
123416a497c6SRafael Auler if (CurNodeFreq > 0) {
123516a497c6SRafael Auler if (uint64_t Addr = EntryAddress[Cur]) {
123616a497c6SRafael Auler DEBUG(
123716a497c6SRafael Auler reportNumber(" Setting flow at entry point address 0x", Addr, 16));
123816a497c6SRafael Auler DEBUG(reportNumber(" with: ", CurNodeFreq, 10));
123916a497c6SRafael Auler Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq;
124016a497c6SRafael Auler }
124116a497c6SRafael Auler }
124216a497c6SRafael Auler
124316a497c6SRafael Auler // No parent? Reached a tree root, limit to call frequency updating.
1244883bf0e8SAmir Ayupov if (SpanningTreeNodes[Cur].NumInEdges == 0)
124516a497c6SRafael Auler continue;
124616a497c6SRafael Auler
124716a497c6SRafael Auler assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
124816a497c6SRafael Auler const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
124916a497c6SRafael Auler
1250cc4b2fb6SRafael Auler // Calculate parent edge freq.
125116a497c6SRafael Auler int64_t ParentEdgeFreq = CurNodeFreq;
125216a497c6SRafael Auler for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) {
125316a497c6SRafael Auler const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID;
125416a497c6SRafael Auler ParentEdgeFreq -= EdgeFreqs[PredEdge];
1255cc4b2fb6SRafael Auler }
125616a497c6SRafael Auler
1257cc4b2fb6SRafael Auler // Sometimes the conservative CFG that BOLT builds will lead to incorrect
1258cc4b2fb6SRafael Auler // flow computation. For example, in a BB that transitively calls the exit
1259cc4b2fb6SRafael Auler // syscall, BOLT will add a fall-through successor even though it should not
1260cc4b2fb6SRafael Auler // have any successors. So this block execution will likely be wrong. We
1261cc4b2fb6SRafael Auler // tolerate this imperfection since this case should be quite infrequent.
1262cc4b2fb6SRafael Auler if (ParentEdgeFreq < 0) {
126316a497c6SRafael Auler DEBUG(dumpEdgeFreqs());
1264cc4b2fb6SRafael Auler DEBUG(report("WARNING: incorrect flow"));
1265cc4b2fb6SRafael Auler ParentEdgeFreq = 0;
1266cc4b2fb6SRafael Auler }
1267cc4b2fb6SRafael Auler DEBUG(reportNumber(" Setting freq for ParentEdge: ", ParentEdge, 10));
1268cc4b2fb6SRafael Auler DEBUG(reportNumber(" with ParentEdgeFreq: ", ParentEdgeFreq, 10));
126916a497c6SRafael Auler EdgeFreqs[ParentEdge] = ParentEdgeFreq;
1270cc4b2fb6SRafael Auler }
1271cc4b2fb6SRafael Auler
127216a497c6SRafael Auler Alloc.deallocate(EntryAddress);
1273cc4b2fb6SRafael Auler Alloc.deallocate(LeafFrequency);
1274cc4b2fb6SRafael Auler Alloc.deallocate(Visited);
1275cc4b2fb6SRafael Auler Alloc.deallocate(Stack);
127616a497c6SRafael Auler CallMap->~NodeToCallsMap();
127716a497c6SRafael Auler Alloc.deallocate(CallMap);
127816a497c6SRafael Auler DEBUG(dumpEdgeFreqs());
1279cc4b2fb6SRafael Auler }
1280cc4b2fb6SRafael Auler
128116a497c6SRafael Auler /// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses
128216a497c6SRafael Auler /// \p Alloc to allocate helper dynamic structures used to compute profile for
12831a2f8336Sspaette /// edges that we do not explicitly instrument.
writeFunctionProfile(int FD,ProfileWriterContext & Ctx,const uint8_t * FuncDesc,BumpPtrAllocator & Alloc)128416a497c6SRafael Auler const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx,
128516a497c6SRafael Auler const uint8_t *FuncDesc,
128616a497c6SRafael Auler BumpPtrAllocator &Alloc) {
128716a497c6SRafael Auler const FunctionDescription F(FuncDesc);
128816a497c6SRafael Auler const uint8_t *next = FuncDesc + F.getSize();
1289cc4b2fb6SRafael Auler
1290a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
1291a0dd5b05SAlexander Shaposhnikov uint64_t *bolt_instr_locations = __bolt_instr_locations;
1292a0dd5b05SAlexander Shaposhnikov #else
1293a0dd5b05SAlexander Shaposhnikov uint64_t *bolt_instr_locations = _bolt_instr_locations_getter();
1294a0dd5b05SAlexander Shaposhnikov #endif
1295a0dd5b05SAlexander Shaposhnikov
1296cc4b2fb6SRafael Auler // Skip funcs we know are cold
1297cc4b2fb6SRafael Auler #ifndef ENABLE_DEBUG
129816a497c6SRafael Auler uint64_t CountersFreq = 0;
1299883bf0e8SAmir Ayupov for (int I = 0; I < F.NumLeafNodes; ++I)
1300a0dd5b05SAlexander Shaposhnikov CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter];
1301883bf0e8SAmir Ayupov
130216a497c6SRafael Auler if (CountersFreq == 0) {
130316a497c6SRafael Auler for (int I = 0; I < F.NumEdges; ++I) {
130416a497c6SRafael Auler const uint32_t C = F.Edges[I].Counter;
130516a497c6SRafael Auler if (C == 0xffffffff)
130616a497c6SRafael Auler continue;
1307a0dd5b05SAlexander Shaposhnikov CountersFreq += bolt_instr_locations[C];
130816a497c6SRafael Auler }
130916a497c6SRafael Auler if (CountersFreq == 0) {
131016a497c6SRafael Auler for (int I = 0; I < F.NumCalls; ++I) {
131116a497c6SRafael Auler const uint32_t C = F.Calls[I].Counter;
131216a497c6SRafael Auler if (C == 0xffffffff)
131316a497c6SRafael Auler continue;
1314a0dd5b05SAlexander Shaposhnikov CountersFreq += bolt_instr_locations[C];
131516a497c6SRafael Auler }
131616a497c6SRafael Auler if (CountersFreq == 0)
1317cc4b2fb6SRafael Auler return next;
131816a497c6SRafael Auler }
131916a497c6SRafael Auler }
1320cc4b2fb6SRafael Auler #endif
1321cc4b2fb6SRafael Auler
1322a0dd5b05SAlexander Shaposhnikov Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx);
1323cc4b2fb6SRafael Auler DEBUG(G->dump());
1324a0dd5b05SAlexander Shaposhnikov
132516a497c6SRafael Auler if (!G->EdgeFreqs && !G->CallFreqs) {
1326cc4b2fb6SRafael Auler G->~Graph();
1327cc4b2fb6SRafael Auler Alloc.deallocate(G);
1328cc4b2fb6SRafael Auler return next;
1329cc4b2fb6SRafael Auler }
1330cc4b2fb6SRafael Auler
133116a497c6SRafael Auler for (int I = 0; I < F.NumEdges; ++I) {
133216a497c6SRafael Auler const uint64_t Freq = G->EdgeFreqs[I];
1333cc4b2fb6SRafael Auler if (Freq == 0)
1334cc4b2fb6SRafael Auler continue;
133516a497c6SRafael Auler const EdgeDescription *Desc = &F.Edges[I];
1336cc4b2fb6SRafael Auler char LineBuf[BufSize];
1337cc4b2fb6SRafael Auler char *Ptr = LineBuf;
133816a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
133916a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
1340cc4b2fb6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22);
1341cc4b2fb6SRafael Auler Ptr = intToStr(Ptr, Freq, 10);
1342cc4b2fb6SRafael Auler *Ptr++ = '\n';
1343cc4b2fb6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf);
1344cc4b2fb6SRafael Auler }
1345cc4b2fb6SRafael Auler
134616a497c6SRafael Auler for (int I = 0; I < F.NumCalls; ++I) {
134716a497c6SRafael Auler const uint64_t Freq = G->CallFreqs[I];
134816a497c6SRafael Auler if (Freq == 0)
134916a497c6SRafael Auler continue;
135016a497c6SRafael Auler char LineBuf[BufSize];
135116a497c6SRafael Auler char *Ptr = LineBuf;
135216a497c6SRafael Auler const CallDescription *Desc = &F.Calls[I];
135316a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize);
135416a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf));
135516a497c6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
135616a497c6SRafael Auler Ptr = intToStr(Ptr, Freq, 10);
135716a497c6SRafael Auler *Ptr++ = '\n';
135816a497c6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf);
135916a497c6SRafael Auler }
136016a497c6SRafael Auler
1361cc4b2fb6SRafael Auler G->~Graph();
1362cc4b2fb6SRafael Auler Alloc.deallocate(G);
1363cc4b2fb6SRafael Auler return next;
1364cc4b2fb6SRafael Auler }
1365cc4b2fb6SRafael Auler
1366a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
136716a497c6SRafael Auler const IndCallTargetDescription *
lookupIndCallTarget(uint64_t Target) const136816a497c6SRafael Auler ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const {
136916a497c6SRafael Auler uint32_t B = 0;
137016a497c6SRafael Auler uint32_t E = __bolt_instr_num_ind_targets;
137116a497c6SRafael Auler if (E == 0)
137216a497c6SRafael Auler return nullptr;
137316a497c6SRafael Auler do {
137416a497c6SRafael Auler uint32_t I = (E - B) / 2 + B;
137516a497c6SRafael Auler if (IndCallTargets[I].Address == Target)
137616a497c6SRafael Auler return &IndCallTargets[I];
137716a497c6SRafael Auler if (IndCallTargets[I].Address < Target)
137816a497c6SRafael Auler B = I + 1;
137916a497c6SRafael Auler else
138016a497c6SRafael Auler E = I;
138116a497c6SRafael Auler } while (B < E);
138216a497c6SRafael Auler return nullptr;
1383cc4b2fb6SRafael Auler }
138462aa74f8SRafael Auler
138516a497c6SRafael Auler /// Write a single indirect call <src, target> pair to the fdata file
visitIndCallCounter(IndirectCallHashTable::MapEntry & Entry,int FD,int CallsiteID,ProfileWriterContext * Ctx)138616a497c6SRafael Auler void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry,
138716a497c6SRafael Auler int FD, int CallsiteID,
138816a497c6SRafael Auler ProfileWriterContext *Ctx) {
138916a497c6SRafael Auler if (Entry.Val == 0)
139016a497c6SRafael Auler return;
139116a497c6SRafael Auler DEBUG(reportNumber("Target func 0x", Entry.Key, 16));
139216a497c6SRafael Auler DEBUG(reportNumber("Target freq: ", Entry.Val, 10));
139316a497c6SRafael Auler const IndCallDescription *CallsiteDesc =
139416a497c6SRafael Auler &Ctx->IndCallDescriptions[CallsiteID];
139516a497c6SRafael Auler const IndCallTargetDescription *TargetDesc =
1396a86dd9aeSDenis Revunov Ctx->lookupIndCallTarget(Entry.Key - TextBaseAddress);
139716a497c6SRafael Auler if (!TargetDesc) {
139816a497c6SRafael Auler DEBUG(report("Failed to lookup indirect call target\n"));
1399cc4b2fb6SRafael Auler char LineBuf[BufSize];
140062aa74f8SRafael Auler char *Ptr = LineBuf;
140116a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
140216a497c6SRafael Auler Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40);
140316a497c6SRafael Auler Ptr = intToStr(Ptr, Entry.Val, 10);
140416a497c6SRafael Auler *Ptr++ = '\n';
140516a497c6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf);
140616a497c6SRafael Auler return;
140716a497c6SRafael Auler }
140816a497c6SRafael Auler Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val;
140916a497c6SRafael Auler char LineBuf[BufSize];
141016a497c6SRafael Auler char *Ptr = LineBuf;
141116a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize);
141216a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
1413cc4b2fb6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
141416a497c6SRafael Auler Ptr = intToStr(Ptr, Entry.Val, 10);
141562aa74f8SRafael Auler *Ptr++ = '\n';
1416821480d2SRafael Auler __write(FD, LineBuf, Ptr - LineBuf);
141762aa74f8SRafael Auler }
1418cc4b2fb6SRafael Auler
141916a497c6SRafael Auler /// Write to \p FD all of the indirect call profiles.
writeIndirectCallProfile(int FD,ProfileWriterContext & Ctx)142016a497c6SRafael Auler void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) {
142116a497c6SRafael Auler for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) {
142216a497c6SRafael Auler DEBUG(reportNumber("IndCallsite #", I, 10));
142316a497c6SRafael Auler GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx);
142416a497c6SRafael Auler }
142516a497c6SRafael Auler }
142616a497c6SRafael Auler
142716a497c6SRafael Auler /// Check a single call flow for a callee versus all known callers. If there are
142816a497c6SRafael Auler /// less callers than what the callee expects, write the difference with source
142916a497c6SRafael Auler /// [unknown] in the profile.
visitCallFlowEntry(CallFlowHashTable::MapEntry & Entry,int FD,ProfileWriterContext * Ctx)143016a497c6SRafael Auler void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
143116a497c6SRafael Auler ProfileWriterContext *Ctx) {
143216a497c6SRafael Auler DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16));
143316a497c6SRafael Auler DEBUG(reportNumber("Calls: ", Entry.Calls, 10));
143416a497c6SRafael Auler DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10));
143516a497c6SRafael Auler DEBUG({
143616a497c6SRafael Auler if (Entry.Calls > Entry.Val)
143716a497c6SRafael Auler report(" More calls than expected!\n");
143816a497c6SRafael Auler });
143916a497c6SRafael Auler if (Entry.Val <= Entry.Calls)
144016a497c6SRafael Auler return;
144116a497c6SRafael Auler DEBUG(reportNumber(
144216a497c6SRafael Auler " Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10));
144316a497c6SRafael Auler const IndCallTargetDescription *TargetDesc =
144416a497c6SRafael Auler Ctx->lookupIndCallTarget(Entry.Key);
144516a497c6SRafael Auler if (!TargetDesc) {
144616a497c6SRafael Auler // There is probably something wrong with this callee and this should be
144716a497c6SRafael Auler // investigated, but I don't want to assert and lose all data collected.
144816a497c6SRafael Auler DEBUG(report("WARNING: failed to look up call target!\n"));
144916a497c6SRafael Auler return;
145016a497c6SRafael Auler }
145116a497c6SRafael Auler char LineBuf[BufSize];
145216a497c6SRafael Auler char *Ptr = LineBuf;
145316a497c6SRafael Auler Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize);
145416a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf));
145516a497c6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25);
145616a497c6SRafael Auler Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10);
145716a497c6SRafael Auler *Ptr++ = '\n';
145816a497c6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf);
145916a497c6SRafael Auler }
146016a497c6SRafael Auler
146116a497c6SRafael Auler /// Open fdata file for writing and return a valid file descriptor, aborting
146216a497c6SRafael Auler /// program upon failure.
openProfile()146316a497c6SRafael Auler int openProfile() {
146416a497c6SRafael Auler // Build the profile name string by appending our PID
146516a497c6SRafael Auler char Buf[BufSize];
146616a497c6SRafael Auler uint64_t PID = __getpid();
1467*219ea267SHeewon Cho char *Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
146816a497c6SRafael Auler if (__bolt_instr_use_pid) {
146916a497c6SRafael Auler Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
147016a497c6SRafael Auler Ptr = intToStr(Ptr, PID, 10);
147116a497c6SRafael Auler Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1));
147216a497c6SRafael Auler }
147316a497c6SRafael Auler *Ptr++ = '\0';
147460bbddf3SDenis Revunov uint64_t FD = __open(Buf, O_WRONLY | O_TRUNC | O_CREAT,
147516a497c6SRafael Auler /*mode=*/0666);
147616a497c6SRafael Auler if (static_cast<int64_t>(FD) < 0) {
147716a497c6SRafael Auler report("Error while trying to open profile file for writing: ");
147816a497c6SRafael Auler report(Buf);
147916a497c6SRafael Auler reportNumber("\nFailed with error number: 0x",
148016a497c6SRafael Auler 0 - static_cast<int64_t>(FD), 16);
148116a497c6SRafael Auler __exit(1);
148216a497c6SRafael Auler }
148316a497c6SRafael Auler return FD;
148416a497c6SRafael Auler }
1485a0dd5b05SAlexander Shaposhnikov
1486a0dd5b05SAlexander Shaposhnikov #endif
1487a0dd5b05SAlexander Shaposhnikov
148816a497c6SRafael Auler } // anonymous namespace
148916a497c6SRafael Auler
1490a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__)
1491a0dd5b05SAlexander Shaposhnikov
149216a497c6SRafael Auler /// Reset all counters in case you want to start profiling a new phase of your
149316a497c6SRafael Auler /// program independently of prior phases.
149416a497c6SRafael Auler /// The address of this function is printed by BOLT and this can be called by
149516a497c6SRafael Auler /// any attached debugger during runtime. There is a useful oneliner for gdb:
149616a497c6SRafael Auler ///
149716a497c6SRafael Auler /// gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \
149816a497c6SRafael Auler /// -ex 'set confirm off' -ex quit
149916a497c6SRafael Auler ///
150016a497c6SRafael Auler /// Where 0xdeadbeef is this function address and PROCESSNAME your binary file
150116a497c6SRafael Auler /// name.
__bolt_instr_clear_counters()150216a497c6SRafael Auler extern "C" void __bolt_instr_clear_counters() {
1503ea2182feSMaksim Panchenko memset(reinterpret_cast<char *>(__bolt_instr_locations), 0,
150416a497c6SRafael Auler __bolt_num_counters * 8);
1505883bf0e8SAmir Ayupov for (int I = 0; I < __bolt_instr_num_ind_calls; ++I)
150616a497c6SRafael Auler GlobalIndCallCounters[I].resetCounters();
150716a497c6SRafael Auler }
150816a497c6SRafael Auler
150916a497c6SRafael Auler /// This is the entry point for profile writing.
151016a497c6SRafael Auler /// There are three ways of getting here:
151116a497c6SRafael Auler ///
151216a497c6SRafael Auler /// * Program execution ended, finalization methods are running and BOLT
151316a497c6SRafael Auler /// hooked into FINI from your binary dynamic section;
151416a497c6SRafael Auler /// * You used the sleep timer option and during initialization we forked
15151a2f8336Sspaette /// a separate process that will call this function periodically;
151616a497c6SRafael Auler /// * BOLT prints this function address so you can attach a debugger and
151716a497c6SRafael Auler /// call this function directly to get your profile written to disk
151816a497c6SRafael Auler /// on demand.
151916a497c6SRafael Auler ///
1520ad79d517SVasily Leonenko extern "C" void __attribute((force_align_arg_pointer))
__bolt_instr_data_dump(int FD)1521a7992981SDenis Revunov __bolt_instr_data_dump(int FD) {
152216a497c6SRafael Auler // Already dumping
152316a497c6SRafael Auler if (!GlobalWriteProfileMutex->acquire())
152416a497c6SRafael Auler return;
152516a497c6SRafael Auler
1526a7992981SDenis Revunov int ret = __lseek(FD, 0, SEEK_SET);
1527a7992981SDenis Revunov assert(ret == 0, "Failed to lseek!");
1528a7992981SDenis Revunov ret = __ftruncate(FD, 0);
1529a7992981SDenis Revunov assert(ret == 0, "Failed to ftruncate!");
153016a497c6SRafael Auler BumpPtrAllocator HashAlloc;
153116a497c6SRafael Auler HashAlloc.setMaxSize(0x6400000);
153216a497c6SRafael Auler ProfileWriterContext Ctx = readDescriptions();
153316a497c6SRafael Auler Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc);
153416a497c6SRafael Auler
153516a497c6SRafael Auler DEBUG(printStats(Ctx));
153616a497c6SRafael Auler
1537cc4b2fb6SRafael Auler BumpPtrAllocator Alloc;
1538eaf1b566SJakub Beránek Alloc.setMaxSize(0x6400000);
153916a497c6SRafael Auler const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1540cc4b2fb6SRafael Auler for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) {
154116a497c6SRafael Auler FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
154216a497c6SRafael Auler Alloc.clear();
1543cc4b2fb6SRafael Auler DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1544cc4b2fb6SRafael Auler }
154516a497c6SRafael Auler assert(FuncDesc == (void *)Ctx.Strings,
1546cc4b2fb6SRafael Auler "FuncDesc ptr must be equal to stringtable");
1547cc4b2fb6SRafael Auler
154816a497c6SRafael Auler writeIndirectCallProfile(FD, Ctx);
154916a497c6SRafael Auler Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx);
155016a497c6SRafael Auler
1551dcdd37fdSVladislav Khmelevsky __fsync(FD);
155216a497c6SRafael Auler __munmap(Ctx.MMapPtr, Ctx.MMapSize);
155316a497c6SRafael Auler __close(Ctx.FileDesc);
155416a497c6SRafael Auler HashAlloc.destroy();
155516a497c6SRafael Auler GlobalWriteProfileMutex->release();
155616a497c6SRafael Auler DEBUG(report("Finished writing profile.\n"));
155716a497c6SRafael Auler }
155816a497c6SRafael Auler
155916a497c6SRafael Auler /// Event loop for our child process spawned during setup to dump profile data
156016a497c6SRafael Auler /// at user-specified intervals
watchProcess()156116a497c6SRafael Auler void watchProcess() {
156216a497c6SRafael Auler timespec ts, rem;
156316a497c6SRafael Auler uint64_t Ellapsed = 0ull;
1564a7992981SDenis Revunov int FD = openProfile();
156576d346caSVladislav Khmelevsky uint64_t ppid;
156676d346caSVladislav Khmelevsky if (__bolt_instr_wait_forks) {
156776d346caSVladislav Khmelevsky // Store parent pgid
156876d346caSVladislav Khmelevsky ppid = -__getpgid(0);
156976d346caSVladislav Khmelevsky // And leave parent process group
157076d346caSVladislav Khmelevsky __setpgid(0, 0);
157176d346caSVladislav Khmelevsky } else {
157276d346caSVladislav Khmelevsky // Store parent pid
157376d346caSVladislav Khmelevsky ppid = __getppid();
157476d346caSVladislav Khmelevsky if (ppid == 1) {
157576d346caSVladislav Khmelevsky // Parent already dead
1576a7992981SDenis Revunov __bolt_instr_data_dump(FD);
157776d346caSVladislav Khmelevsky goto out;
157876d346caSVladislav Khmelevsky }
157976d346caSVladislav Khmelevsky }
158076d346caSVladislav Khmelevsky
158116a497c6SRafael Auler ts.tv_sec = 1;
158216a497c6SRafael Auler ts.tv_nsec = 0;
158316a497c6SRafael Auler while (1) {
158416a497c6SRafael Auler __nanosleep(&ts, &rem);
158576d346caSVladislav Khmelevsky // This means our parent process or all its forks are dead,
158676d346caSVladislav Khmelevsky // so no need for us to keep dumping.
158776d346caSVladislav Khmelevsky if (__kill(ppid, 0) < 0) {
158876d346caSVladislav Khmelevsky if (__bolt_instr_no_counters_clear)
1589a7992981SDenis Revunov __bolt_instr_data_dump(FD);
159016a497c6SRafael Auler break;
159116a497c6SRafael Auler }
159276d346caSVladislav Khmelevsky
159316a497c6SRafael Auler if (++Ellapsed < __bolt_instr_sleep_time)
159416a497c6SRafael Auler continue;
159576d346caSVladislav Khmelevsky
159616a497c6SRafael Auler Ellapsed = 0;
1597a7992981SDenis Revunov __bolt_instr_data_dump(FD);
159876d346caSVladislav Khmelevsky if (__bolt_instr_no_counters_clear == false)
159916a497c6SRafael Auler __bolt_instr_clear_counters();
160016a497c6SRafael Auler }
160176d346caSVladislav Khmelevsky
160276d346caSVladislav Khmelevsky out:;
160316a497c6SRafael Auler DEBUG(report("My parent process is dead, bye!\n"));
1604a7992981SDenis Revunov __close(FD);
160516a497c6SRafael Auler __exit(0);
160616a497c6SRafael Auler }
160716a497c6SRafael Auler
160816a497c6SRafael Auler extern "C" void __bolt_instr_indirect_call();
160916a497c6SRafael Auler extern "C" void __bolt_instr_indirect_tailcall();
161016a497c6SRafael Auler
161116a497c6SRafael Auler /// Initialization code
__bolt_instr_setup()1612ad79d517SVasily Leonenko extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() {
161358a16d84SAmir Ayupov __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call;
161458a16d84SAmir Ayupov __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall;
1615a86dd9aeSDenis Revunov TextBaseAddress = getTextBaseAddress();
161658a16d84SAmir Ayupov
161716a497c6SRafael Auler const uint64_t CountersStart =
161816a497c6SRafael Auler reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]);
161916a497c6SRafael Auler const uint64_t CountersEnd = alignTo(
162016a497c6SRafael Auler reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]),
162116a497c6SRafael Auler 0x1000);
162216a497c6SRafael Auler DEBUG(reportNumber("replace mmap start: ", CountersStart, 16));
162316a497c6SRafael Auler DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16));
162416a497c6SRafael Auler assert(CountersEnd > CountersStart, "no counters");
162558a16d84SAmir Ayupov
162658a16d84SAmir Ayupov const bool Shared = !__bolt_instr_use_pid;
162758a16d84SAmir Ayupov const uint64_t MapPrivateOrShared = Shared ? MAP_SHARED : MAP_PRIVATE;
162858a16d84SAmir Ayupov
16298b23a853SDenis Revunov void *Ret =
16308b23a853SDenis Revunov __mmap(CountersStart, CountersEnd - CountersStart, PROT_READ | PROT_WRITE,
163158a16d84SAmir Ayupov MAP_ANONYMOUS | MapPrivateOrShared | MAP_FIXED, -1, 0);
16328ed172cfSDenis Revunov assert(Ret != MAP_FAILED, "__bolt_instr_setup: Failed to mmap counters!");
163358a16d84SAmir Ayupov
16340cc19b56SDenis Revunov GlobalMetadataStorage = __mmap(0, 4096, PROT_READ | PROT_WRITE,
16350cc19b56SDenis Revunov MapPrivateOrShared | MAP_ANONYMOUS, -1, 0);
16360cc19b56SDenis Revunov assert(GlobalMetadataStorage != MAP_FAILED,
16370cc19b56SDenis Revunov "__bolt_instr_setup: failed to mmap page for metadata!");
16380cc19b56SDenis Revunov
16390cc19b56SDenis Revunov GlobalAlloc = new (GlobalMetadataStorage) BumpPtrAllocator;
16400cc19b56SDenis Revunov // Conservatively reserve 100MiB
16410cc19b56SDenis Revunov GlobalAlloc->setMaxSize(0x6400000);
16420cc19b56SDenis Revunov GlobalAlloc->setShared(Shared);
16430cc19b56SDenis Revunov GlobalWriteProfileMutex = new (*GlobalAlloc, 0) Mutex();
164416a497c6SRafael Auler if (__bolt_instr_num_ind_calls > 0)
164516a497c6SRafael Auler GlobalIndCallCounters =
16460cc19b56SDenis Revunov new (*GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls];
164716a497c6SRafael Auler
164816a497c6SRafael Auler if (__bolt_instr_sleep_time != 0) {
164976d346caSVladislav Khmelevsky // Separate instrumented process to the own process group
165076d346caSVladislav Khmelevsky if (__bolt_instr_wait_forks)
165176d346caSVladislav Khmelevsky __setpgid(0, 0);
165276d346caSVladislav Khmelevsky
1653c7306cc2SAmir Ayupov if (long PID = __fork())
165416a497c6SRafael Auler return;
165516a497c6SRafael Auler watchProcess();
165616a497c6SRafael Auler }
165716a497c6SRafael Auler }
165816a497c6SRafael Auler
1659361f3b55SVladislav Khmelevsky extern "C" __attribute((force_align_arg_pointer)) void
instrumentIndirectCall(uint64_t Target,uint64_t IndCallID)1660361f3b55SVladislav Khmelevsky instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) {
16610cc19b56SDenis Revunov GlobalIndCallCounters[IndCallID].incrementVal(Target, *GlobalAlloc);
166216a497c6SRafael Auler }
166316a497c6SRafael Auler
166416a497c6SRafael Auler /// We receive as in-stack arguments the identifier of the indirect call site
166516a497c6SRafael Auler /// as well as the target address for the call
__bolt_instr_indirect_call()166616a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_indirect_call()
166716a497c6SRafael Auler {
166887e9c424SElvina Yakubova #if defined(__aarch64__)
166987e9c424SElvina Yakubova // clang-format off
167087e9c424SElvina Yakubova __asm__ __volatile__(SAVE_ALL
167187e9c424SElvina Yakubova "ldp x0, x1, [sp, #288]\n"
167287e9c424SElvina Yakubova "bl instrumentIndirectCall\n"
167387e9c424SElvina Yakubova RESTORE_ALL
167487e9c424SElvina Yakubova "ret\n"
167587e9c424SElvina Yakubova :::);
167687e9c424SElvina Yakubova // clang-format on
167787e9c424SElvina Yakubova #else
167887e9c424SElvina Yakubova // clang-format off
167916a497c6SRafael Auler __asm__ __volatile__(SAVE_ALL
1680361f3b55SVladislav Khmelevsky "mov 0xa0(%%rsp), %%rdi\n"
1681361f3b55SVladislav Khmelevsky "mov 0x98(%%rsp), %%rsi\n"
168216a497c6SRafael Auler "call instrumentIndirectCall\n"
168316a497c6SRafael Auler RESTORE_ALL
1684361f3b55SVladislav Khmelevsky "ret\n"
168516a497c6SRafael Auler :::);
168687e9c424SElvina Yakubova // clang-format on
168787e9c424SElvina Yakubova #endif
168816a497c6SRafael Auler }
168916a497c6SRafael Auler
__bolt_instr_indirect_tailcall()169016a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall()
169116a497c6SRafael Auler {
169287e9c424SElvina Yakubova #if defined(__aarch64__)
169387e9c424SElvina Yakubova // clang-format off
169487e9c424SElvina Yakubova __asm__ __volatile__(SAVE_ALL
169587e9c424SElvina Yakubova "ldp x0, x1, [sp, #288]\n"
169687e9c424SElvina Yakubova "bl instrumentIndirectCall\n"
169787e9c424SElvina Yakubova RESTORE_ALL
169887e9c424SElvina Yakubova "ret\n"
169987e9c424SElvina Yakubova :::);
170087e9c424SElvina Yakubova // clang-format on
170187e9c424SElvina Yakubova #else
170287e9c424SElvina Yakubova // clang-format off
170316a497c6SRafael Auler __asm__ __volatile__(SAVE_ALL
1704361f3b55SVladislav Khmelevsky "mov 0x98(%%rsp), %%rdi\n"
1705361f3b55SVladislav Khmelevsky "mov 0x90(%%rsp), %%rsi\n"
170616a497c6SRafael Auler "call instrumentIndirectCall\n"
170716a497c6SRafael Auler RESTORE_ALL
1708361f3b55SVladislav Khmelevsky "ret\n"
170916a497c6SRafael Auler :::);
171087e9c424SElvina Yakubova // clang-format on
171187e9c424SElvina Yakubova #endif
171216a497c6SRafael Auler }
171316a497c6SRafael Auler
171416a497c6SRafael Auler /// This is hooking ELF's entry, it needs to save all machine state.
__bolt_instr_start()171516a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_start()
171616a497c6SRafael Auler {
171787e9c424SElvina Yakubova #if defined(__aarch64__)
171887e9c424SElvina Yakubova // clang-format off
171987e9c424SElvina Yakubova __asm__ __volatile__(SAVE_ALL
172087e9c424SElvina Yakubova "bl __bolt_instr_setup\n"
172187e9c424SElvina Yakubova RESTORE_ALL
172287e9c424SElvina Yakubova "adrp x16, __bolt_start_trampoline\n"
172387e9c424SElvina Yakubova "add x16, x16, #:lo12:__bolt_start_trampoline\n"
172487e9c424SElvina Yakubova "br x16\n"
172587e9c424SElvina Yakubova :::);
172687e9c424SElvina Yakubova // clang-format on
172787e9c424SElvina Yakubova #else
172887e9c424SElvina Yakubova // clang-format off
172916a497c6SRafael Auler __asm__ __volatile__(SAVE_ALL
173016a497c6SRafael Auler "call __bolt_instr_setup\n"
173116a497c6SRafael Auler RESTORE_ALL
1732ad79d517SVasily Leonenko "jmp __bolt_start_trampoline\n"
173316a497c6SRafael Auler :::);
173487e9c424SElvina Yakubova // clang-format on
173587e9c424SElvina Yakubova #endif
173616a497c6SRafael Auler }
173716a497c6SRafael Auler
173816a497c6SRafael Auler /// This is hooking into ELF's DT_FINI
__bolt_instr_fini()173916a497c6SRafael Auler extern "C" void __bolt_instr_fini() {
174087e9c424SElvina Yakubova #if defined(__aarch64__)
174187e9c424SElvina Yakubova // clang-format off
174287e9c424SElvina Yakubova __asm__ __volatile__(SAVE_ALL
174387e9c424SElvina Yakubova "adrp x16, __bolt_fini_trampoline\n"
174487e9c424SElvina Yakubova "add x16, x16, #:lo12:__bolt_fini_trampoline\n"
174587e9c424SElvina Yakubova "blr x16\n"
174687e9c424SElvina Yakubova RESTORE_ALL
174787e9c424SElvina Yakubova :::);
174887e9c424SElvina Yakubova // clang-format on
174987e9c424SElvina Yakubova #else
175087e9c424SElvina Yakubova __asm__ __volatile__("call __bolt_fini_trampoline\n" :::);
175187e9c424SElvina Yakubova #endif
1752a7992981SDenis Revunov if (__bolt_instr_sleep_time == 0) {
1753a7992981SDenis Revunov int FD = openProfile();
1754a7992981SDenis Revunov __bolt_instr_data_dump(FD);
1755a7992981SDenis Revunov __close(FD);
1756a7992981SDenis Revunov }
175716a497c6SRafael Auler DEBUG(report("Finished.\n"));
175862aa74f8SRafael Auler }
1759bbd9d610SAlexander Shaposhnikov
17603b876cc3SAlexander Shaposhnikov #endif
17613b876cc3SAlexander Shaposhnikov
17623b876cc3SAlexander Shaposhnikov #if defined(__APPLE__)
1763bbd9d610SAlexander Shaposhnikov
__bolt_instr_data_dump()1764a0dd5b05SAlexander Shaposhnikov extern "C" void __bolt_instr_data_dump() {
1765a0dd5b05SAlexander Shaposhnikov ProfileWriterContext Ctx = readDescriptions();
1766a0dd5b05SAlexander Shaposhnikov
1767a0dd5b05SAlexander Shaposhnikov int FD = 2;
1768a0dd5b05SAlexander Shaposhnikov BumpPtrAllocator Alloc;
1769a0dd5b05SAlexander Shaposhnikov const uint8_t *FuncDesc = Ctx.FuncDescriptions;
1770a0dd5b05SAlexander Shaposhnikov uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter();
1771a0dd5b05SAlexander Shaposhnikov
1772a0dd5b05SAlexander Shaposhnikov for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) {
1773a0dd5b05SAlexander Shaposhnikov FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc);
1774a0dd5b05SAlexander Shaposhnikov Alloc.clear();
1775a0dd5b05SAlexander Shaposhnikov DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16));
1776a0dd5b05SAlexander Shaposhnikov }
1777a0dd5b05SAlexander Shaposhnikov assert(FuncDesc == (void *)Ctx.Strings,
1778a0dd5b05SAlexander Shaposhnikov "FuncDesc ptr must be equal to stringtable");
1779a0dd5b05SAlexander Shaposhnikov }
1780a0dd5b05SAlexander Shaposhnikov
1781bbd9d610SAlexander Shaposhnikov // On OSX/iOS the final symbol name of an extern "C" function/variable contains
1782bbd9d610SAlexander Shaposhnikov // one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup.
17833b876cc3SAlexander Shaposhnikov extern "C"
17843b876cc3SAlexander Shaposhnikov __attribute__((section("__TEXT,__setup")))
17853b876cc3SAlexander Shaposhnikov __attribute__((force_align_arg_pointer))
_bolt_instr_setup()17863b876cc3SAlexander Shaposhnikov void _bolt_instr_setup() {
1787a0dd5b05SAlexander Shaposhnikov __asm__ __volatile__(SAVE_ALL :::);
17883b876cc3SAlexander Shaposhnikov
1789a0dd5b05SAlexander Shaposhnikov report("Hello!\n");
17903b876cc3SAlexander Shaposhnikov
1791a0dd5b05SAlexander Shaposhnikov __asm__ __volatile__(RESTORE_ALL :::);
17921cf23e5eSAlexander Shaposhnikov }
1793bbd9d610SAlexander Shaposhnikov
17943b876cc3SAlexander Shaposhnikov extern "C"
17953b876cc3SAlexander Shaposhnikov __attribute__((section("__TEXT,__fini")))
17963b876cc3SAlexander Shaposhnikov __attribute__((force_align_arg_pointer))
_bolt_instr_fini()17973b876cc3SAlexander Shaposhnikov void _bolt_instr_fini() {
1798a0dd5b05SAlexander Shaposhnikov report("Bye!\n");
1799a0dd5b05SAlexander Shaposhnikov __bolt_instr_data_dump();
1800e067f2adSAlexander Shaposhnikov }
1801e067f2adSAlexander Shaposhnikov
1802bbd9d610SAlexander Shaposhnikov #endif
1803