12f09f445SMaksim Panchenko //===- bolt/runtime/instr.cpp ---------------------------------------------===// 262aa74f8SRafael Auler // 3da752c9cSRafael Auler // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4da752c9cSRafael Auler // See https://llvm.org/LICENSE.txt for license information. 5da752c9cSRafael Auler // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 662aa74f8SRafael Auler // 762aa74f8SRafael Auler //===----------------------------------------------------------------------===// 862aa74f8SRafael Auler // 916a497c6SRafael Auler // BOLT runtime instrumentation library for x86 Linux. Currently, BOLT does 1016a497c6SRafael Auler // not support linking modules with dependencies on one another into the final 1116a497c6SRafael Auler // binary (TODO?), which means this library has to be self-contained in a single 1216a497c6SRafael Auler // module. 1316a497c6SRafael Auler // 1416a497c6SRafael Auler // All extern declarations here need to be defined by BOLT itself. Those will be 1516a497c6SRafael Auler // undefined symbols that BOLT needs to resolve by emitting these symbols with 1616a497c6SRafael Auler // MCStreamer. Currently, Passes/Instrumentation.cpp is the pass responsible 1716a497c6SRafael Auler // for defining the symbols here and these two files have a tight coupling: one 1816a497c6SRafael Auler // working statically when you run BOLT and another during program runtime when 1916a497c6SRafael Auler // you run an instrumented binary. The main goal here is to output an fdata file 2016a497c6SRafael Auler // (BOLT profile) with the instrumentation counters inserted by the static pass. 2116a497c6SRafael Auler // Counters for indirect calls are an exception, as we can't know them 2216a497c6SRafael Auler // statically. These counters are created and managed here. To allow this, we 2316a497c6SRafael Auler // need a minimal framework for allocating memory dynamically. We provide this 2416a497c6SRafael Auler // with the BumpPtrAllocator class (not LLVM's, but our own version of it). 2516a497c6SRafael Auler // 2616a497c6SRafael Auler // Since this code is intended to be inserted into any executable, we decided to 2716a497c6SRafael Auler // make it standalone and do not depend on any external libraries (i.e. language 2816a497c6SRafael Auler // support libraries, such as glibc or stdc++). To allow this, we provide a few 2916a497c6SRafael Auler // light implementations of common OS interacting functionalities using direct 3016a497c6SRafael Auler // syscall wrappers. Our simple allocator doesn't manage deallocations that 3116a497c6SRafael Auler // fragment the memory space, so it's stack based. This is the minimal framework 3216a497c6SRafael Auler // provided here to allow processing instrumented counters and writing fdata. 3316a497c6SRafael Auler // 3416a497c6SRafael Auler // In the C++ idiom used here, we never use or rely on constructors or 3516a497c6SRafael Auler // destructors for global objects. That's because those need support from the 3616a497c6SRafael Auler // linker in initialization/finalization code, and we want to keep our linker 3716a497c6SRafael Auler // very simple. Similarly, we don't create any global objects that are zero 3816a497c6SRafael Auler // initialized, since those would need to go .bss, which our simple linker also 3916a497c6SRafael Auler // don't support (TODO?). 4062aa74f8SRafael Auler // 4162aa74f8SRafael Auler //===----------------------------------------------------------------------===// 4262aa74f8SRafael Auler 43cb8d701bSVladislav Khmelevsky #if defined (__x86_64__) 449bd71615SXun Li #include "common.h" 4562aa74f8SRafael Auler 4616a497c6SRafael Auler // Enables a very verbose logging to stderr useful when debugging 47cc4b2fb6SRafael Auler //#define ENABLE_DEBUG 48cc4b2fb6SRafael Auler 49cc4b2fb6SRafael Auler #ifdef ENABLE_DEBUG 50cc4b2fb6SRafael Auler #define DEBUG(X) \ 51cc4b2fb6SRafael Auler { X; } 52cc4b2fb6SRafael Auler #else 53cc4b2fb6SRafael Auler #define DEBUG(X) \ 54cc4b2fb6SRafael Auler {} 55cc4b2fb6SRafael Auler #endif 56cc4b2fb6SRafael Auler 57af58da4eSVladislav Khmelevsky #pragma GCC visibility push(hidden) 583b876cc3SAlexander Shaposhnikov 593b876cc3SAlexander Shaposhnikov extern "C" { 60553f28e9SVladislav Khmelevsky 61553f28e9SVladislav Khmelevsky #if defined(__APPLE__) 623b876cc3SAlexander Shaposhnikov extern uint64_t* _bolt_instr_locations_getter(); 633b876cc3SAlexander Shaposhnikov extern uint32_t _bolt_num_counters_getter(); 643b876cc3SAlexander Shaposhnikov 65a0dd5b05SAlexander Shaposhnikov extern uint8_t* _bolt_instr_tables_getter(); 66a0dd5b05SAlexander Shaposhnikov extern uint32_t _bolt_instr_num_funcs_getter(); 673b876cc3SAlexander Shaposhnikov 683b876cc3SAlexander Shaposhnikov #else 69bbd9d610SAlexander Shaposhnikov 7016a497c6SRafael Auler // Main counters inserted by instrumentation, incremented during runtime when 7116a497c6SRafael Auler // points of interest (locations) in the program are reached. Those are direct 7216a497c6SRafael Auler // calls and direct and indirect branches (local ones). There are also counters 7316a497c6SRafael Auler // for basic block execution if they are a spanning tree leaf and need to be 7416a497c6SRafael Auler // counted in order to infer the execution count of other edges of the CFG. 7562aa74f8SRafael Auler extern uint64_t __bolt_instr_locations[]; 7616a497c6SRafael Auler extern uint32_t __bolt_num_counters; 7716a497c6SRafael Auler // Descriptions are serialized metadata about binary functions written by BOLT, 7816a497c6SRafael Auler // so we have a minimal understanding about the program structure. For a 7916a497c6SRafael Auler // reference on the exact format of this metadata, see *Description structs, 8016a497c6SRafael Auler // Location, IntrumentedNode and EntryNode. 8116a497c6SRafael Auler // Number of indirect call site descriptions 8216a497c6SRafael Auler extern uint32_t __bolt_instr_num_ind_calls; 8316a497c6SRafael Auler // Number of indirect call target descriptions 8416a497c6SRafael Auler extern uint32_t __bolt_instr_num_ind_targets; 85cc4b2fb6SRafael Auler // Number of function descriptions 86cc4b2fb6SRafael Auler extern uint32_t __bolt_instr_num_funcs; 8716a497c6SRafael Auler // Time to sleep across dumps (when we write the fdata profile to disk) 8816a497c6SRafael Auler extern uint32_t __bolt_instr_sleep_time; 8976d346caSVladislav Khmelevsky // Do not clear counters across dumps, rewrite file with the updated values 9076d346caSVladislav Khmelevsky extern bool __bolt_instr_no_counters_clear; 9176d346caSVladislav Khmelevsky // Wait until all forks of instrumented process will finish 9276d346caSVladislav Khmelevsky extern bool __bolt_instr_wait_forks; 93cc4b2fb6SRafael Auler // Filename to dump data to 9462aa74f8SRafael Auler extern char __bolt_instr_filename[]; 95519cbbaaSVasily Leonenko // Instumented binary file path 96519cbbaaSVasily Leonenko extern char __bolt_instr_binpath[]; 9716a497c6SRafael Auler // If true, append current PID to the fdata filename when creating it so 9816a497c6SRafael Auler // different invocations of the same program can be differentiated. 9916a497c6SRafael Auler extern bool __bolt_instr_use_pid; 10016a497c6SRafael Auler // Functions that will be used to instrument indirect calls. BOLT static pass 10116a497c6SRafael Auler // will identify indirect calls and modify them to load the address in these 10216a497c6SRafael Auler // trampolines and call this address instead. BOLT can't use direct calls to 10316a497c6SRafael Auler // our handlers because our addresses here are not known at analysis time. We 10416a497c6SRafael Auler // only support resolving dependencies from this file to the output of BOLT, 10516a497c6SRafael Auler // *not* the other way around. 10616a497c6SRafael Auler // TODO: We need better linking support to make that happen. 107361f3b55SVladislav Khmelevsky extern void (*__bolt_ind_call_counter_func_pointer)(); 108361f3b55SVladislav Khmelevsky extern void (*__bolt_ind_tailcall_counter_func_pointer)(); 109ad79d517SVasily Leonenko // Function pointers to init/fini trampoline routines in the binary, so we can 110ad79d517SVasily Leonenko // resume regular execution of these functions that we hooked 111553f28e9SVladislav Khmelevsky extern void __bolt_start_trampoline(); 112553f28e9SVladislav Khmelevsky extern void __bolt_fini_trampoline(); 11362aa74f8SRafael Auler 114a0dd5b05SAlexander Shaposhnikov #endif 115553f28e9SVladislav Khmelevsky } 116a0dd5b05SAlexander Shaposhnikov 117cc4b2fb6SRafael Auler namespace { 118cc4b2fb6SRafael Auler 119cc4b2fb6SRafael Auler /// A simple allocator that mmaps a fixed size region and manages this space 120cc4b2fb6SRafael Auler /// in a stack fashion, meaning you always deallocate the last element that 12116a497c6SRafael Auler /// was allocated. In practice, we don't need to deallocate individual elements. 12216a497c6SRafael Auler /// We monotonically increase our usage and then deallocate everything once we 12316a497c6SRafael Auler /// are done processing something. 124cc4b2fb6SRafael Auler class BumpPtrAllocator { 12516a497c6SRafael Auler /// This is written before each allocation and act as a canary to detect when 12616a497c6SRafael Auler /// a bug caused our program to cross allocation boundaries. 127cc4b2fb6SRafael Auler struct EntryMetadata { 128cc4b2fb6SRafael Auler uint64_t Magic; 129cc4b2fb6SRafael Auler uint64_t AllocSize; 130cc4b2fb6SRafael Auler }; 1319bd71615SXun Li 132cc4b2fb6SRafael Auler public: 133faaefff6SAlexander Shaposhnikov void *allocate(size_t Size) { 13416a497c6SRafael Auler Lock L(M); 135a0dd5b05SAlexander Shaposhnikov 136cc4b2fb6SRafael Auler if (StackBase == nullptr) { 13716a497c6SRafael Auler StackBase = reinterpret_cast<uint8_t *>( 1388b23a853SDenis Revunov __mmap(0, MaxSize, PROT_READ | PROT_WRITE, 1398b23a853SDenis Revunov (Shared ? MAP_SHARED : MAP_PRIVATE) | MAP_ANONYMOUS, -1, 0)); 1408ed172cfSDenis Revunov assert(StackBase != MAP_FAILED, 1418ed172cfSDenis Revunov "BumpPtrAllocator: failed to mmap stack!"); 142cc4b2fb6SRafael Auler StackSize = 0; 143cc4b2fb6SRafael Auler } 144a0dd5b05SAlexander Shaposhnikov 145cc4b2fb6SRafael Auler Size = alignTo(Size + sizeof(EntryMetadata), 16); 146cc4b2fb6SRafael Auler uint8_t *AllocAddress = StackBase + StackSize + sizeof(EntryMetadata); 147cc4b2fb6SRafael Auler auto *M = reinterpret_cast<EntryMetadata *>(StackBase + StackSize); 14816a497c6SRafael Auler M->Magic = Magic; 149cc4b2fb6SRafael Auler M->AllocSize = Size; 150cc4b2fb6SRafael Auler StackSize += Size; 15116a497c6SRafael Auler assert(StackSize < MaxSize, "allocator ran out of memory"); 152cc4b2fb6SRafael Auler return AllocAddress; 153cc4b2fb6SRafael Auler } 154cc4b2fb6SRafael Auler 15516a497c6SRafael Auler #ifdef DEBUG 15616a497c6SRafael Auler /// Element-wise deallocation is only used for debugging to catch memory 15716a497c6SRafael Auler /// bugs by checking magic bytes. Ordinarily, we reset the allocator once 15816a497c6SRafael Auler /// we are done with it. Reset is done with clear(). There's no need 15916a497c6SRafael Auler /// to deallocate each element individually. 160cc4b2fb6SRafael Auler void deallocate(void *Ptr) { 16116a497c6SRafael Auler Lock L(M); 162cc4b2fb6SRafael Auler uint8_t MetadataOffset = sizeof(EntryMetadata); 163cc4b2fb6SRafael Auler auto *M = reinterpret_cast<EntryMetadata *>( 164cc4b2fb6SRafael Auler reinterpret_cast<uint8_t *>(Ptr) - MetadataOffset); 165cc4b2fb6SRafael Auler const uint8_t *StackTop = StackBase + StackSize + MetadataOffset; 166cc4b2fb6SRafael Auler // Validate size 167cc4b2fb6SRafael Auler if (Ptr != StackTop - M->AllocSize) { 16816a497c6SRafael Auler // Failed validation, check if it is a pointer returned by operator new [] 169cc4b2fb6SRafael Auler MetadataOffset += 170cc4b2fb6SRafael Auler sizeof(uint64_t); // Space for number of elements alloc'ed 171cc4b2fb6SRafael Auler M = reinterpret_cast<EntryMetadata *>(reinterpret_cast<uint8_t *>(Ptr) - 172cc4b2fb6SRafael Auler MetadataOffset); 17316a497c6SRafael Auler // Ok, it failed both checks if this assertion fails. Stop the program, we 17416a497c6SRafael Auler // have a memory bug. 175cc4b2fb6SRafael Auler assert(Ptr == StackTop - M->AllocSize, 176cc4b2fb6SRafael Auler "must deallocate the last element alloc'ed"); 177cc4b2fb6SRafael Auler } 17816a497c6SRafael Auler assert(M->Magic == Magic, "allocator magic is corrupt"); 179cc4b2fb6SRafael Auler StackSize -= M->AllocSize; 180cc4b2fb6SRafael Auler } 18116a497c6SRafael Auler #else 18216a497c6SRafael Auler void deallocate(void *) {} 18316a497c6SRafael Auler #endif 18416a497c6SRafael Auler 18516a497c6SRafael Auler void clear() { 18616a497c6SRafael Auler Lock L(M); 18716a497c6SRafael Auler StackSize = 0; 18816a497c6SRafael Auler } 18916a497c6SRafael Auler 19016a497c6SRafael Auler /// Set mmap reservation size (only relevant before first allocation) 1919bd71615SXun Li void setMaxSize(uint64_t Size) { MaxSize = Size; } 19216a497c6SRafael Auler 19316a497c6SRafael Auler /// Set mmap reservation privacy (only relevant before first allocation) 1949bd71615SXun Li void setShared(bool S) { Shared = S; } 19516a497c6SRafael Auler 19616a497c6SRafael Auler void destroy() { 19716a497c6SRafael Auler if (StackBase == nullptr) 19816a497c6SRafael Auler return; 19916a497c6SRafael Auler __munmap(StackBase, MaxSize); 20016a497c6SRafael Auler } 201cc4b2fb6SRafael Auler 2020cc19b56SDenis Revunov // Placement operator to construct allocator in possibly shared mmaped memory 2030cc19b56SDenis Revunov static void *operator new(size_t, void *Ptr) { return Ptr; }; 2040cc19b56SDenis Revunov 205cc4b2fb6SRafael Auler private: 20616a497c6SRafael Auler static constexpr uint64_t Magic = 0x1122334455667788ull; 20716a497c6SRafael Auler uint64_t MaxSize = 0xa00000; 208cc4b2fb6SRafael Auler uint8_t *StackBase{nullptr}; 209cc4b2fb6SRafael Auler uint64_t StackSize{0}; 21016a497c6SRafael Auler bool Shared{false}; 21116a497c6SRafael Auler Mutex M; 212cc4b2fb6SRafael Auler }; 213cc4b2fb6SRafael Auler 21416a497c6SRafael Auler /// Used for allocating indirect call instrumentation counters. Initialized by 21516a497c6SRafael Auler /// __bolt_instr_setup, our initialization routine. 2160cc19b56SDenis Revunov BumpPtrAllocator *GlobalAlloc; 2170cc19b56SDenis Revunov 218*a86dd9aeSDenis Revunov // Base address which we substract from recorded PC values when searching for 219*a86dd9aeSDenis Revunov // indirect call description entries. Needed because indCall descriptions are 220*a86dd9aeSDenis Revunov // mapped read-only and contain static addresses. Initialized in 221*a86dd9aeSDenis Revunov // __bolt_instr_setup. 222*a86dd9aeSDenis Revunov uint64_t TextBaseAddress = 0; 223*a86dd9aeSDenis Revunov 2240cc19b56SDenis Revunov // Storage for GlobalAlloc which can be shared if not using 2250cc19b56SDenis Revunov // instrumentation-file-append-pid. 2260cc19b56SDenis Revunov void *GlobalMetadataStorage; 2270cc19b56SDenis Revunov 228cc4b2fb6SRafael Auler } // anonymous namespace 229cc4b2fb6SRafael Auler 230cc4b2fb6SRafael Auler // User-defined placement new operators. We only use those (as opposed to 231cc4b2fb6SRafael Auler // overriding the regular operator new) so we can keep our allocator in the 232cc4b2fb6SRafael Auler // stack instead of in a data section (global). 233faaefff6SAlexander Shaposhnikov void *operator new(size_t Sz, BumpPtrAllocator &A) { return A.allocate(Sz); } 234faaefff6SAlexander Shaposhnikov void *operator new(size_t Sz, BumpPtrAllocator &A, char C) { 235cc4b2fb6SRafael Auler auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz)); 236ea2182feSMaksim Panchenko memset(Ptr, C, Sz); 237cc4b2fb6SRafael Auler return Ptr; 238cc4b2fb6SRafael Auler } 239faaefff6SAlexander Shaposhnikov void *operator new[](size_t Sz, BumpPtrAllocator &A) { 240cc4b2fb6SRafael Auler return A.allocate(Sz); 241cc4b2fb6SRafael Auler } 242faaefff6SAlexander Shaposhnikov void *operator new[](size_t Sz, BumpPtrAllocator &A, char C) { 243cc4b2fb6SRafael Auler auto *Ptr = reinterpret_cast<char *>(A.allocate(Sz)); 244ea2182feSMaksim Panchenko memset(Ptr, C, Sz); 245cc4b2fb6SRafael Auler return Ptr; 246cc4b2fb6SRafael Auler } 247cc4b2fb6SRafael Auler // Only called during exception unwinding (useless). We must manually dealloc. 248cc4b2fb6SRafael Auler // C++ language weirdness 2499bd71615SXun Li void operator delete(void *Ptr, BumpPtrAllocator &A) { A.deallocate(Ptr); } 250cc4b2fb6SRafael Auler 251cc4b2fb6SRafael Auler namespace { 252cc4b2fb6SRafael Auler 2539aa134dcSVasily Leonenko // Disable instrumentation optimizations that sacrifice profile accuracy 2549aa134dcSVasily Leonenko extern "C" bool __bolt_instr_conservative; 2559aa134dcSVasily Leonenko 25616a497c6SRafael Auler /// Basic key-val atom stored in our hash 25716a497c6SRafael Auler struct SimpleHashTableEntryBase { 25816a497c6SRafael Auler uint64_t Key; 25916a497c6SRafael Auler uint64_t Val; 26047934c11SDenis Revunov void dump(const char *Msg = nullptr) { 26147934c11SDenis Revunov // TODO: make some sort of formatting function 26247934c11SDenis Revunov // Currently we have to do it the ugly way because 26347934c11SDenis Revunov // we want every message to be printed atomically via a single call to 26447934c11SDenis Revunov // __write. If we use reportNumber() and others nultiple times, we'll get 26547934c11SDenis Revunov // garbage in mulithreaded environment 26647934c11SDenis Revunov char Buf[BufSize]; 26747934c11SDenis Revunov char *Ptr = Buf; 26847934c11SDenis Revunov Ptr = intToStr(Ptr, __getpid(), 10); 26947934c11SDenis Revunov *Ptr++ = ':'; 27047934c11SDenis Revunov *Ptr++ = ' '; 27147934c11SDenis Revunov if (Msg) 27247934c11SDenis Revunov Ptr = strCopy(Ptr, Msg, strLen(Msg)); 27347934c11SDenis Revunov *Ptr++ = '0'; 27447934c11SDenis Revunov *Ptr++ = 'x'; 27547934c11SDenis Revunov Ptr = intToStr(Ptr, (uint64_t)this, 16); 27647934c11SDenis Revunov *Ptr++ = ':'; 27747934c11SDenis Revunov *Ptr++ = ' '; 27847934c11SDenis Revunov Ptr = strCopy(Ptr, "MapEntry(0x", sizeof("MapEntry(0x") - 1); 27947934c11SDenis Revunov Ptr = intToStr(Ptr, Key, 16); 28047934c11SDenis Revunov *Ptr++ = ','; 28147934c11SDenis Revunov *Ptr++ = ' '; 28247934c11SDenis Revunov *Ptr++ = '0'; 28347934c11SDenis Revunov *Ptr++ = 'x'; 28447934c11SDenis Revunov Ptr = intToStr(Ptr, Val, 16); 28547934c11SDenis Revunov *Ptr++ = ')'; 28647934c11SDenis Revunov *Ptr++ = '\n'; 28747934c11SDenis Revunov assert(Ptr - Buf < BufSize, "Buffer overflow!"); 28847934c11SDenis Revunov // print everything all at once for atomicity 28947934c11SDenis Revunov __write(2, Buf, Ptr - Buf); 29047934c11SDenis Revunov } 29116a497c6SRafael Auler }; 29216a497c6SRafael Auler 29316a497c6SRafael Auler /// This hash table implementation starts by allocating a table of size 29416a497c6SRafael Auler /// InitialSize. When conflicts happen in this main table, it resolves 29516a497c6SRafael Auler /// them by chaining a new table of size IncSize. It never reallocs as our 29616a497c6SRafael Auler /// allocator doesn't support it. The key is intended to be function pointers. 29716a497c6SRafael Auler /// There's no clever hash function (it's just x mod size, size being prime). 29816a497c6SRafael Auler /// I never tuned the coefficientes in the modular equation (TODO) 29916a497c6SRafael Auler /// This is used for indirect calls (each call site has one of this, so it 30016a497c6SRafael Auler /// should have a small footprint) and for tallying call counts globally for 30116a497c6SRafael Auler /// each target to check if we missed the origin of some calls (this one is a 30216a497c6SRafael Auler /// large instantiation of this template, since it is global for all call sites) 30316a497c6SRafael Auler template <typename T = SimpleHashTableEntryBase, uint32_t InitialSize = 7, 30416a497c6SRafael Auler uint32_t IncSize = 7> 30516a497c6SRafael Auler class SimpleHashTable { 30616a497c6SRafael Auler public: 30716a497c6SRafael Auler using MapEntry = T; 30816a497c6SRafael Auler 30916a497c6SRafael Auler /// Increment by 1 the value of \p Key. If it is not in this table, it will be 31016a497c6SRafael Auler /// added to the table and its value set to 1. 31116a497c6SRafael Auler void incrementVal(uint64_t Key, BumpPtrAllocator &Alloc) { 3120cc19b56SDenis Revunov if (!__bolt_instr_conservative) { 3130cc19b56SDenis Revunov TryLock L(M); 3140cc19b56SDenis Revunov if (!L.isLocked()) 3150cc19b56SDenis Revunov return; 3160cc19b56SDenis Revunov auto &E = getOrAllocEntry(Key, Alloc); 3170cc19b56SDenis Revunov ++E.Val; 3180cc19b56SDenis Revunov return; 3190cc19b56SDenis Revunov } 3200cc19b56SDenis Revunov Lock L(M); 3210cc19b56SDenis Revunov auto &E = getOrAllocEntry(Key, Alloc); 3220cc19b56SDenis Revunov ++E.Val; 32316a497c6SRafael Auler } 32416a497c6SRafael Auler 32516a497c6SRafael Auler /// Basic member accessing interface. Here we pass the allocator explicitly to 32616a497c6SRafael Auler /// avoid storing a pointer to it as part of this table (remember there is one 32716a497c6SRafael Auler /// hash for each indirect call site, so we wan't to minimize our footprint). 32816a497c6SRafael Auler MapEntry &get(uint64_t Key, BumpPtrAllocator &Alloc) { 3299aa134dcSVasily Leonenko if (!__bolt_instr_conservative) { 3309aa134dcSVasily Leonenko TryLock L(M); 3319aa134dcSVasily Leonenko if (!L.isLocked()) 3329aa134dcSVasily Leonenko return NoEntry; 3339aa134dcSVasily Leonenko return getOrAllocEntry(Key, Alloc); 3349aa134dcSVasily Leonenko } 33516a497c6SRafael Auler Lock L(M); 3369aa134dcSVasily Leonenko return getOrAllocEntry(Key, Alloc); 33716a497c6SRafael Auler } 33816a497c6SRafael Auler 33916a497c6SRafael Auler /// Traverses all elements in the table 34016a497c6SRafael Auler template <typename... Args> 34116a497c6SRafael Auler void forEachElement(void (*Callback)(MapEntry &, Args...), Args... args) { 342bd301a41SMichał Chojnowski Lock L(M); 34316a497c6SRafael Auler if (!TableRoot) 34416a497c6SRafael Auler return; 34516a497c6SRafael Auler return forEachElement(Callback, InitialSize, TableRoot, args...); 34616a497c6SRafael Auler } 34716a497c6SRafael Auler 34816a497c6SRafael Auler void resetCounters(); 34916a497c6SRafael Auler 35016a497c6SRafael Auler private: 35116a497c6SRafael Auler constexpr static uint64_t VacantMarker = 0; 35216a497c6SRafael Auler constexpr static uint64_t FollowUpTableMarker = 0x8000000000000000ull; 35316a497c6SRafael Auler 35416a497c6SRafael Auler MapEntry *TableRoot{nullptr}; 3559aa134dcSVasily Leonenko MapEntry NoEntry; 35616a497c6SRafael Auler Mutex M; 35716a497c6SRafael Auler 35816a497c6SRafael Auler template <typename... Args> 35916a497c6SRafael Auler void forEachElement(void (*Callback)(MapEntry &, Args...), 36016a497c6SRafael Auler uint32_t NumEntries, MapEntry *Entries, Args... args) { 361c7306cc2SAmir Ayupov for (uint32_t I = 0; I < NumEntries; ++I) { 362c7306cc2SAmir Ayupov MapEntry &Entry = Entries[I]; 36316a497c6SRafael Auler if (Entry.Key == VacantMarker) 36416a497c6SRafael Auler continue; 36516a497c6SRafael Auler if (Entry.Key & FollowUpTableMarker) { 3660cc19b56SDenis Revunov MapEntry *Next = 3670cc19b56SDenis Revunov reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker); 3680cc19b56SDenis Revunov assert(Next != Entries, "Circular reference!"); 3690cc19b56SDenis Revunov forEachElement(Callback, IncSize, Next, args...); 37016a497c6SRafael Auler continue; 37116a497c6SRafael Auler } 37216a497c6SRafael Auler Callback(Entry, args...); 37316a497c6SRafael Auler } 37416a497c6SRafael Auler } 37516a497c6SRafael Auler 37616a497c6SRafael Auler MapEntry &firstAllocation(uint64_t Key, BumpPtrAllocator &Alloc) { 37716a497c6SRafael Auler TableRoot = new (Alloc, 0) MapEntry[InitialSize]; 378c7306cc2SAmir Ayupov MapEntry &Entry = TableRoot[Key % InitialSize]; 37916a497c6SRafael Auler Entry.Key = Key; 38047934c11SDenis Revunov // DEBUG(Entry.dump("Created root entry: ")); 38116a497c6SRafael Auler return Entry; 38216a497c6SRafael Auler } 38316a497c6SRafael Auler 38416a497c6SRafael Auler MapEntry &getEntry(MapEntry *Entries, uint64_t Key, uint64_t Selector, 38516a497c6SRafael Auler BumpPtrAllocator &Alloc, int CurLevel) { 38647934c11SDenis Revunov // DEBUG(reportNumber("getEntry called, level ", CurLevel, 10)); 38716a497c6SRafael Auler const uint32_t NumEntries = CurLevel == 0 ? InitialSize : IncSize; 38816a497c6SRafael Auler uint64_t Remainder = Selector / NumEntries; 38916a497c6SRafael Auler Selector = Selector % NumEntries; 390c7306cc2SAmir Ayupov MapEntry &Entry = Entries[Selector]; 39116a497c6SRafael Auler 39216a497c6SRafael Auler // A hit 39316a497c6SRafael Auler if (Entry.Key == Key) { 39447934c11SDenis Revunov // DEBUG(Entry.dump("Hit: ")); 39516a497c6SRafael Auler return Entry; 39616a497c6SRafael Auler } 39716a497c6SRafael Auler 39816a497c6SRafael Auler // Vacant - add new entry 39916a497c6SRafael Auler if (Entry.Key == VacantMarker) { 40016a497c6SRafael Auler Entry.Key = Key; 40147934c11SDenis Revunov // DEBUG(Entry.dump("Adding new entry: ")); 40216a497c6SRafael Auler return Entry; 40316a497c6SRafael Auler } 40416a497c6SRafael Auler 40516a497c6SRafael Auler // Defer to the next level 40616a497c6SRafael Auler if (Entry.Key & FollowUpTableMarker) { 40716a497c6SRafael Auler return getEntry( 40816a497c6SRafael Auler reinterpret_cast<MapEntry *>(Entry.Key & ~FollowUpTableMarker), 40916a497c6SRafael Auler Key, Remainder, Alloc, CurLevel + 1); 41016a497c6SRafael Auler } 41116a497c6SRafael Auler 41216a497c6SRafael Auler // Conflict - create the next level 41347934c11SDenis Revunov // DEBUG(Entry.dump("Creating new level: ")); 41447934c11SDenis Revunov 41516a497c6SRafael Auler MapEntry *NextLevelTbl = new (Alloc, 0) MapEntry[IncSize]; 41647934c11SDenis Revunov // DEBUG( 41747934c11SDenis Revunov // reportNumber("Newly allocated level: 0x", uint64_t(NextLevelTbl), 41847934c11SDenis Revunov // 16)); 41916a497c6SRafael Auler uint64_t CurEntrySelector = Entry.Key / InitialSize; 42016a497c6SRafael Auler for (int I = 0; I < CurLevel; ++I) 42116a497c6SRafael Auler CurEntrySelector /= IncSize; 42216a497c6SRafael Auler CurEntrySelector = CurEntrySelector % IncSize; 42316a497c6SRafael Auler NextLevelTbl[CurEntrySelector] = Entry; 42416a497c6SRafael Auler Entry.Key = reinterpret_cast<uint64_t>(NextLevelTbl) | FollowUpTableMarker; 425ad4e0770SDenis Revunov assert((NextLevelTbl[CurEntrySelector].Key & ~FollowUpTableMarker) != 426ad4e0770SDenis Revunov uint64_t(Entries), 427ad4e0770SDenis Revunov "circular reference created!\n"); 42847934c11SDenis Revunov // DEBUG(NextLevelTbl[CurEntrySelector].dump("New level entry: ")); 42947934c11SDenis Revunov // DEBUG(Entry.dump("Updated old entry: ")); 43016a497c6SRafael Auler return getEntry(NextLevelTbl, Key, Remainder, Alloc, CurLevel + 1); 43116a497c6SRafael Auler } 4329aa134dcSVasily Leonenko 4339aa134dcSVasily Leonenko MapEntry &getOrAllocEntry(uint64_t Key, BumpPtrAllocator &Alloc) { 4340cc19b56SDenis Revunov if (TableRoot) { 4350cc19b56SDenis Revunov MapEntry &E = getEntry(TableRoot, Key, Key, Alloc, 0); 4360cc19b56SDenis Revunov assert(!(E.Key & FollowUpTableMarker), "Invalid entry!"); 4370cc19b56SDenis Revunov return E; 4380cc19b56SDenis Revunov } 4399aa134dcSVasily Leonenko return firstAllocation(Key, Alloc); 4409aa134dcSVasily Leonenko } 44116a497c6SRafael Auler }; 44216a497c6SRafael Auler 44316a497c6SRafael Auler template <typename T> void resetIndCallCounter(T &Entry) { 44416a497c6SRafael Auler Entry.Val = 0; 44516a497c6SRafael Auler } 44616a497c6SRafael Auler 44716a497c6SRafael Auler template <typename T, uint32_t X, uint32_t Y> 44816a497c6SRafael Auler void SimpleHashTable<T, X, Y>::resetCounters() { 44916a497c6SRafael Auler forEachElement(resetIndCallCounter); 45016a497c6SRafael Auler } 45116a497c6SRafael Auler 45216a497c6SRafael Auler /// Represents a hash table mapping a function target address to its counter. 45316a497c6SRafael Auler using IndirectCallHashTable = SimpleHashTable<>; 45416a497c6SRafael Auler 45516a497c6SRafael Auler /// Initialize with number 1 instead of 0 so we don't go into .bss. This is the 45616a497c6SRafael Auler /// global array of all hash tables storing indirect call destinations happening 45716a497c6SRafael Auler /// during runtime, one table per call site. 45816a497c6SRafael Auler IndirectCallHashTable *GlobalIndCallCounters{ 45916a497c6SRafael Auler reinterpret_cast<IndirectCallHashTable *>(1)}; 46016a497c6SRafael Auler 46116a497c6SRafael Auler /// Don't allow reentrancy in the fdata writing phase - only one thread writes 46216a497c6SRafael Auler /// it 46316a497c6SRafael Auler Mutex *GlobalWriteProfileMutex{reinterpret_cast<Mutex *>(1)}; 46416a497c6SRafael Auler 46516a497c6SRafael Auler /// Store number of calls in additional to target address (Key) and frequency 46616a497c6SRafael Auler /// as perceived by the basic block counter (Val). 46716a497c6SRafael Auler struct CallFlowEntryBase : public SimpleHashTableEntryBase { 46816a497c6SRafael Auler uint64_t Calls; 46916a497c6SRafael Auler }; 47016a497c6SRafael Auler 47116a497c6SRafael Auler using CallFlowHashTableBase = SimpleHashTable<CallFlowEntryBase, 11939, 233>; 47216a497c6SRafael Auler 47316a497c6SRafael Auler /// This is a large table indexing all possible call targets (indirect and 47416a497c6SRafael Auler /// direct ones). The goal is to find mismatches between number of calls (for 47516a497c6SRafael Auler /// those calls we were able to track) and the entry basic block counter of the 47616a497c6SRafael Auler /// callee. In most cases, these two should be equal. If not, there are two 47716a497c6SRafael Auler /// possible scenarios here: 47816a497c6SRafael Auler /// 47916a497c6SRafael Auler /// * Entry BB has higher frequency than all known calls to this function. 48016a497c6SRafael Auler /// In this case, we have dynamic library code or any uninstrumented code 48116a497c6SRafael Auler /// calling this function. We will write the profile for these untracked 48216a497c6SRafael Auler /// calls as having source "0 [unknown] 0" in the fdata file. 48316a497c6SRafael Auler /// 48416a497c6SRafael Auler /// * Number of known calls is higher than the frequency of entry BB 48516a497c6SRafael Auler /// This only happens when there is no counter for the entry BB / callee 48616a497c6SRafael Auler /// function is not simple (in BOLT terms). We don't do anything special 48716a497c6SRafael Auler /// here and just ignore those (we still report all calls to the non-simple 48816a497c6SRafael Auler /// function, though). 48916a497c6SRafael Auler /// 49016a497c6SRafael Auler class CallFlowHashTable : public CallFlowHashTableBase { 49116a497c6SRafael Auler public: 49216a497c6SRafael Auler CallFlowHashTable(BumpPtrAllocator &Alloc) : Alloc(Alloc) {} 49316a497c6SRafael Auler 49416a497c6SRafael Auler MapEntry &get(uint64_t Key) { return CallFlowHashTableBase::get(Key, Alloc); } 49516a497c6SRafael Auler 49616a497c6SRafael Auler private: 49716a497c6SRafael Auler // Different than the hash table for indirect call targets, we do store the 49816a497c6SRafael Auler // allocator here since there is only one call flow hash and space overhead 49916a497c6SRafael Auler // is negligible. 50016a497c6SRafael Auler BumpPtrAllocator &Alloc; 50116a497c6SRafael Auler }; 50216a497c6SRafael Auler 50316a497c6SRafael Auler /// 50416a497c6SRafael Auler /// Description metadata emitted by BOLT to describe the program - refer to 50516a497c6SRafael Auler /// Passes/Instrumentation.cpp - Instrumentation::emitTablesAsELFNote() 50616a497c6SRafael Auler /// 50716a497c6SRafael Auler struct Location { 50816a497c6SRafael Auler uint32_t FunctionName; 50916a497c6SRafael Auler uint32_t Offset; 51016a497c6SRafael Auler }; 51116a497c6SRafael Auler 51216a497c6SRafael Auler struct CallDescription { 51316a497c6SRafael Auler Location From; 51416a497c6SRafael Auler uint32_t FromNode; 51516a497c6SRafael Auler Location To; 51616a497c6SRafael Auler uint32_t Counter; 51716a497c6SRafael Auler uint64_t TargetAddress; 51816a497c6SRafael Auler }; 51916a497c6SRafael Auler 52016a497c6SRafael Auler using IndCallDescription = Location; 52116a497c6SRafael Auler 52216a497c6SRafael Auler struct IndCallTargetDescription { 52316a497c6SRafael Auler Location Loc; 52416a497c6SRafael Auler uint64_t Address; 52516a497c6SRafael Auler }; 52616a497c6SRafael Auler 52716a497c6SRafael Auler struct EdgeDescription { 52816a497c6SRafael Auler Location From; 52916a497c6SRafael Auler uint32_t FromNode; 53016a497c6SRafael Auler Location To; 53116a497c6SRafael Auler uint32_t ToNode; 53216a497c6SRafael Auler uint32_t Counter; 53316a497c6SRafael Auler }; 53416a497c6SRafael Auler 53516a497c6SRafael Auler struct InstrumentedNode { 53616a497c6SRafael Auler uint32_t Node; 53716a497c6SRafael Auler uint32_t Counter; 53816a497c6SRafael Auler }; 53916a497c6SRafael Auler 54016a497c6SRafael Auler struct EntryNode { 54116a497c6SRafael Auler uint64_t Node; 54216a497c6SRafael Auler uint64_t Address; 54316a497c6SRafael Auler }; 54416a497c6SRafael Auler 54516a497c6SRafael Auler struct FunctionDescription { 54616a497c6SRafael Auler uint32_t NumLeafNodes; 54716a497c6SRafael Auler const InstrumentedNode *LeafNodes; 54816a497c6SRafael Auler uint32_t NumEdges; 54916a497c6SRafael Auler const EdgeDescription *Edges; 55016a497c6SRafael Auler uint32_t NumCalls; 55116a497c6SRafael Auler const CallDescription *Calls; 55216a497c6SRafael Auler uint32_t NumEntryNodes; 55316a497c6SRafael Auler const EntryNode *EntryNodes; 55416a497c6SRafael Auler 55516a497c6SRafael Auler /// Constructor will parse the serialized function metadata written by BOLT 55616a497c6SRafael Auler FunctionDescription(const uint8_t *FuncDesc); 55716a497c6SRafael Auler 55816a497c6SRafael Auler uint64_t getSize() const { 55916a497c6SRafael Auler return 16 + NumLeafNodes * sizeof(InstrumentedNode) + 56016a497c6SRafael Auler NumEdges * sizeof(EdgeDescription) + 56116a497c6SRafael Auler NumCalls * sizeof(CallDescription) + 56216a497c6SRafael Auler NumEntryNodes * sizeof(EntryNode); 56316a497c6SRafael Auler } 56416a497c6SRafael Auler }; 56516a497c6SRafael Auler 56616a497c6SRafael Auler /// The context is created when the fdata profile needs to be written to disk 56716a497c6SRafael Auler /// and we need to interpret our runtime counters. It contains pointers to the 56816a497c6SRafael Auler /// mmaped binary (only the BOLT written metadata section). Deserialization 56916a497c6SRafael Auler /// should be straightforward as most data is POD or an array of POD elements. 57016a497c6SRafael Auler /// This metadata is used to reconstruct function CFGs. 57116a497c6SRafael Auler struct ProfileWriterContext { 57216a497c6SRafael Auler IndCallDescription *IndCallDescriptions; 57316a497c6SRafael Auler IndCallTargetDescription *IndCallTargets; 57416a497c6SRafael Auler uint8_t *FuncDescriptions; 57516a497c6SRafael Auler char *Strings; // String table with function names used in this binary 57616a497c6SRafael Auler int FileDesc; // File descriptor for the file on disk backing this 57716a497c6SRafael Auler // information in memory via mmap 57816a497c6SRafael Auler void *MMapPtr; // The mmap ptr 57916a497c6SRafael Auler int MMapSize; // The mmap size 58016a497c6SRafael Auler 58116a497c6SRafael Auler /// Hash table storing all possible call destinations to detect untracked 58216a497c6SRafael Auler /// calls and correctly report them as [unknown] in output fdata. 58316a497c6SRafael Auler CallFlowHashTable *CallFlowTable; 58416a497c6SRafael Auler 58516a497c6SRafael Auler /// Lookup the sorted indirect call target vector to fetch function name and 58616a497c6SRafael Auler /// offset for an arbitrary function pointer. 58716a497c6SRafael Auler const IndCallTargetDescription *lookupIndCallTarget(uint64_t Target) const; 58816a497c6SRafael Auler }; 58916a497c6SRafael Auler 59016a497c6SRafael Auler /// Perform a string comparison and returns zero if Str1 matches Str2. Compares 59116a497c6SRafael Auler /// at most Size characters. 592cc4b2fb6SRafael Auler int compareStr(const char *Str1, const char *Str2, int Size) { 593821480d2SRafael Auler while (*Str1 == *Str2) { 594821480d2SRafael Auler if (*Str1 == '\0' || --Size == 0) 595821480d2SRafael Auler return 0; 596821480d2SRafael Auler ++Str1; 597821480d2SRafael Auler ++Str2; 598821480d2SRafael Auler } 599821480d2SRafael Auler return 1; 600821480d2SRafael Auler } 601821480d2SRafael Auler 60216a497c6SRafael Auler /// Output Location to the fdata file 60316a497c6SRafael Auler char *serializeLoc(const ProfileWriterContext &Ctx, char *OutBuf, 604cc4b2fb6SRafael Auler const Location Loc, uint32_t BufSize) { 605821480d2SRafael Auler // fdata location format: Type Name Offset 606821480d2SRafael Auler // Type 1 - regular symbol 607821480d2SRafael Auler OutBuf = strCopy(OutBuf, "1 "); 60816a497c6SRafael Auler const char *Str = Ctx.Strings + Loc.FunctionName; 609cc4b2fb6SRafael Auler uint32_t Size = 25; 61062aa74f8SRafael Auler while (*Str) { 61162aa74f8SRafael Auler *OutBuf++ = *Str++; 612cc4b2fb6SRafael Auler if (++Size >= BufSize) 613cc4b2fb6SRafael Auler break; 61462aa74f8SRafael Auler } 615cc4b2fb6SRafael Auler assert(!*Str, "buffer overflow, function name too large"); 61662aa74f8SRafael Auler *OutBuf++ = ' '; 617821480d2SRafael Auler OutBuf = intToStr(OutBuf, Loc.Offset, 16); 61862aa74f8SRafael Auler *OutBuf++ = ' '; 61962aa74f8SRafael Auler return OutBuf; 62062aa74f8SRafael Auler } 62162aa74f8SRafael Auler 62216a497c6SRafael Auler /// Read and deserialize a function description written by BOLT. \p FuncDesc 62316a497c6SRafael Auler /// points at the beginning of the function metadata structure in the file. 62416a497c6SRafael Auler /// See Instrumentation::emitTablesAsELFNote() 62516a497c6SRafael Auler FunctionDescription::FunctionDescription(const uint8_t *FuncDesc) { 62616a497c6SRafael Auler NumLeafNodes = *reinterpret_cast<const uint32_t *>(FuncDesc); 62716a497c6SRafael Auler DEBUG(reportNumber("NumLeafNodes = ", NumLeafNodes, 10)); 62816a497c6SRafael Auler LeafNodes = reinterpret_cast<const InstrumentedNode *>(FuncDesc + 4); 62916a497c6SRafael Auler 63016a497c6SRafael Auler NumEdges = *reinterpret_cast<const uint32_t *>( 63116a497c6SRafael Auler FuncDesc + 4 + NumLeafNodes * sizeof(InstrumentedNode)); 63216a497c6SRafael Auler DEBUG(reportNumber("NumEdges = ", NumEdges, 10)); 63316a497c6SRafael Auler Edges = reinterpret_cast<const EdgeDescription *>( 63416a497c6SRafael Auler FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode)); 63516a497c6SRafael Auler 63616a497c6SRafael Auler NumCalls = *reinterpret_cast<const uint32_t *>( 63716a497c6SRafael Auler FuncDesc + 8 + NumLeafNodes * sizeof(InstrumentedNode) + 63816a497c6SRafael Auler NumEdges * sizeof(EdgeDescription)); 63916a497c6SRafael Auler DEBUG(reportNumber("NumCalls = ", NumCalls, 10)); 64016a497c6SRafael Auler Calls = reinterpret_cast<const CallDescription *>( 64116a497c6SRafael Auler FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) + 64216a497c6SRafael Auler NumEdges * sizeof(EdgeDescription)); 64316a497c6SRafael Auler NumEntryNodes = *reinterpret_cast<const uint32_t *>( 64416a497c6SRafael Auler FuncDesc + 12 + NumLeafNodes * sizeof(InstrumentedNode) + 64516a497c6SRafael Auler NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription)); 64616a497c6SRafael Auler DEBUG(reportNumber("NumEntryNodes = ", NumEntryNodes, 10)); 64716a497c6SRafael Auler EntryNodes = reinterpret_cast<const EntryNode *>( 64816a497c6SRafael Auler FuncDesc + 16 + NumLeafNodes * sizeof(InstrumentedNode) + 64916a497c6SRafael Auler NumEdges * sizeof(EdgeDescription) + NumCalls * sizeof(CallDescription)); 65016a497c6SRafael Auler } 65116a497c6SRafael Auler 65216a497c6SRafael Auler /// Read and mmap descriptions written by BOLT from the executable's notes 65316a497c6SRafael Auler /// section 654a0dd5b05SAlexander Shaposhnikov #if defined(HAVE_ELF_H) and !defined(__APPLE__) 6552ffd6e2bSElvina Yakubova 6562ffd6e2bSElvina Yakubova void *__attribute__((noinline)) __get_pc() { 6572ffd6e2bSElvina Yakubova return __builtin_extract_return_addr(__builtin_return_address(0)); 6582ffd6e2bSElvina Yakubova } 6592ffd6e2bSElvina Yakubova 6602ffd6e2bSElvina Yakubova /// Get string with address and parse it to hex pair <StartAddress, EndAddress> 6612ffd6e2bSElvina Yakubova bool parseAddressRange(const char *Str, uint64_t &StartAddress, 6622ffd6e2bSElvina Yakubova uint64_t &EndAddress) { 6632ffd6e2bSElvina Yakubova if (!Str) 6642ffd6e2bSElvina Yakubova return false; 6652ffd6e2bSElvina Yakubova // Parsed string format: <hex1>-<hex2> 6662ffd6e2bSElvina Yakubova StartAddress = hexToLong(Str, '-'); 6672ffd6e2bSElvina Yakubova while (*Str && *Str != '-') 6682ffd6e2bSElvina Yakubova ++Str; 6692ffd6e2bSElvina Yakubova if (!*Str) 6702ffd6e2bSElvina Yakubova return false; 6712ffd6e2bSElvina Yakubova ++Str; // swallow '-' 6722ffd6e2bSElvina Yakubova EndAddress = hexToLong(Str); 6732ffd6e2bSElvina Yakubova return true; 6742ffd6e2bSElvina Yakubova } 6752ffd6e2bSElvina Yakubova 6762ffd6e2bSElvina Yakubova /// Get full path to the real binary by getting current virtual address 6772ffd6e2bSElvina Yakubova /// and searching for the appropriate link in address range in 6782ffd6e2bSElvina Yakubova /// /proc/self/map_files 6792ffd6e2bSElvina Yakubova static char *getBinaryPath() { 6802ffd6e2bSElvina Yakubova const uint32_t BufSize = 1024; 68146bc197dSMarius Wachtler const uint32_t NameMax = 4096; 6822ffd6e2bSElvina Yakubova const char DirPath[] = "/proc/self/map_files/"; 6832ffd6e2bSElvina Yakubova static char TargetPath[NameMax] = {}; 6842ffd6e2bSElvina Yakubova char Buf[BufSize]; 6852ffd6e2bSElvina Yakubova 686519cbbaaSVasily Leonenko if (__bolt_instr_binpath[0] != '\0') 687519cbbaaSVasily Leonenko return __bolt_instr_binpath; 688519cbbaaSVasily Leonenko 6892ffd6e2bSElvina Yakubova if (TargetPath[0] != '\0') 6902ffd6e2bSElvina Yakubova return TargetPath; 6912ffd6e2bSElvina Yakubova 6922ffd6e2bSElvina Yakubova unsigned long CurAddr = (unsigned long)__get_pc(); 69360bbddf3SDenis Revunov uint64_t FDdir = __open(DirPath, O_RDONLY, 694821480d2SRafael Auler /*mode=*/0666); 6953b00a3a2SMarius Wachtler assert(static_cast<int64_t>(FDdir) >= 0, 6962ffd6e2bSElvina Yakubova "failed to open /proc/self/map_files"); 6972ffd6e2bSElvina Yakubova 6982ffd6e2bSElvina Yakubova while (long Nread = __getdents(FDdir, (struct dirent *)Buf, BufSize)) { 6992ffd6e2bSElvina Yakubova assert(static_cast<int64_t>(Nread) != -1, "failed to get folder entries"); 7002ffd6e2bSElvina Yakubova 7012ffd6e2bSElvina Yakubova struct dirent *d; 7022ffd6e2bSElvina Yakubova for (long Bpos = 0; Bpos < Nread; Bpos += d->d_reclen) { 7032ffd6e2bSElvina Yakubova d = (struct dirent *)(Buf + Bpos); 7042ffd6e2bSElvina Yakubova 7052ffd6e2bSElvina Yakubova uint64_t StartAddress, EndAddress; 7062ffd6e2bSElvina Yakubova if (!parseAddressRange(d->d_name, StartAddress, EndAddress)) 7072ffd6e2bSElvina Yakubova continue; 7082ffd6e2bSElvina Yakubova if (CurAddr < StartAddress || CurAddr > EndAddress) 7092ffd6e2bSElvina Yakubova continue; 7102ffd6e2bSElvina Yakubova char FindBuf[NameMax]; 7112ffd6e2bSElvina Yakubova char *C = strCopy(FindBuf, DirPath, NameMax); 7122ffd6e2bSElvina Yakubova C = strCopy(C, d->d_name, NameMax - (C - FindBuf)); 7132ffd6e2bSElvina Yakubova *C = '\0'; 7142ffd6e2bSElvina Yakubova uint32_t Ret = __readlink(FindBuf, TargetPath, sizeof(TargetPath)); 7152ffd6e2bSElvina Yakubova assert(Ret != -1 && Ret != BufSize, "readlink error"); 7162ffd6e2bSElvina Yakubova TargetPath[Ret] = '\0'; 7172ffd6e2bSElvina Yakubova return TargetPath; 7182ffd6e2bSElvina Yakubova } 7192ffd6e2bSElvina Yakubova } 7202ffd6e2bSElvina Yakubova return nullptr; 7212ffd6e2bSElvina Yakubova } 7222ffd6e2bSElvina Yakubova 7232ffd6e2bSElvina Yakubova ProfileWriterContext readDescriptions() { 7242ffd6e2bSElvina Yakubova ProfileWriterContext Result; 7252ffd6e2bSElvina Yakubova char *BinPath = getBinaryPath(); 7262ffd6e2bSElvina Yakubova assert(BinPath && BinPath[0] != '\0', "failed to find binary path"); 7272ffd6e2bSElvina Yakubova 72860bbddf3SDenis Revunov uint64_t FD = __open(BinPath, O_RDONLY, 7292ffd6e2bSElvina Yakubova /*mode=*/0666); 7303b00a3a2SMarius Wachtler assert(static_cast<int64_t>(FD) >= 0, "failed to open binary path"); 7312ffd6e2bSElvina Yakubova 732821480d2SRafael Auler Result.FileDesc = FD; 733821480d2SRafael Auler 734821480d2SRafael Auler // mmap our binary to memory 73560bbddf3SDenis Revunov uint64_t Size = __lseek(FD, 0, SEEK_END); 736821480d2SRafael Auler uint8_t *BinContents = reinterpret_cast<uint8_t *>( 7378b23a853SDenis Revunov __mmap(0, Size, PROT_READ, MAP_PRIVATE, FD, 0)); 7388ed172cfSDenis Revunov assert(BinContents != MAP_FAILED, "readDescriptions: Failed to mmap self!"); 739821480d2SRafael Auler Result.MMapPtr = BinContents; 740821480d2SRafael Auler Result.MMapSize = Size; 741821480d2SRafael Auler Elf64_Ehdr *Hdr = reinterpret_cast<Elf64_Ehdr *>(BinContents); 742821480d2SRafael Auler Elf64_Shdr *Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff); 743821480d2SRafael Auler Elf64_Shdr *StringTblHeader = reinterpret_cast<Elf64_Shdr *>( 744821480d2SRafael Auler BinContents + Hdr->e_shoff + Hdr->e_shstrndx * Hdr->e_shentsize); 745821480d2SRafael Auler 746821480d2SRafael Auler // Find .bolt.instr.tables with the data we need and set pointers to it 747821480d2SRafael Auler for (int I = 0; I < Hdr->e_shnum; ++I) { 748821480d2SRafael Auler char *SecName = reinterpret_cast<char *>( 749821480d2SRafael Auler BinContents + StringTblHeader->sh_offset + Shdr->sh_name); 750821480d2SRafael Auler if (compareStr(SecName, ".bolt.instr.tables", 64) != 0) { 751821480d2SRafael Auler Shdr = reinterpret_cast<Elf64_Shdr *>(BinContents + Hdr->e_shoff + 752821480d2SRafael Auler (I + 1) * Hdr->e_shentsize); 753821480d2SRafael Auler continue; 754821480d2SRafael Auler } 755821480d2SRafael Auler // Actual contents of the ELF note start after offset 20 decimal: 756821480d2SRafael Auler // Offset 0: Producer name size (4 bytes) 757821480d2SRafael Auler // Offset 4: Contents size (4 bytes) 758821480d2SRafael Auler // Offset 8: Note type (4 bytes) 759821480d2SRafael Auler // Offset 12: Producer name (BOLT\0) (5 bytes + align to 4-byte boundary) 760821480d2SRafael Auler // Offset 20: Contents 76116a497c6SRafael Auler uint32_t IndCallDescSize = 762cc4b2fb6SRafael Auler *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 20); 76316a497c6SRafael Auler uint32_t IndCallTargetDescSize = *reinterpret_cast<uint32_t *>( 76416a497c6SRafael Auler BinContents + Shdr->sh_offset + 24 + IndCallDescSize); 76516a497c6SRafael Auler uint32_t FuncDescSize = 76616a497c6SRafael Auler *reinterpret_cast<uint32_t *>(BinContents + Shdr->sh_offset + 28 + 76716a497c6SRafael Auler IndCallDescSize + IndCallTargetDescSize); 76816a497c6SRafael Auler Result.IndCallDescriptions = reinterpret_cast<IndCallDescription *>( 76916a497c6SRafael Auler BinContents + Shdr->sh_offset + 24); 77016a497c6SRafael Auler Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>( 77116a497c6SRafael Auler BinContents + Shdr->sh_offset + 28 + IndCallDescSize); 77216a497c6SRafael Auler Result.FuncDescriptions = BinContents + Shdr->sh_offset + 32 + 77316a497c6SRafael Auler IndCallDescSize + IndCallTargetDescSize; 77416a497c6SRafael Auler Result.Strings = reinterpret_cast<char *>( 77516a497c6SRafael Auler BinContents + Shdr->sh_offset + 32 + IndCallDescSize + 77616a497c6SRafael Auler IndCallTargetDescSize + FuncDescSize); 777821480d2SRafael Auler return Result; 778821480d2SRafael Auler } 779821480d2SRafael Auler const char ErrMsg[] = 780821480d2SRafael Auler "BOLT instrumentation runtime error: could not find section " 781821480d2SRafael Auler ".bolt.instr.tables\n"; 782821480d2SRafael Auler reportError(ErrMsg, sizeof(ErrMsg)); 783821480d2SRafael Auler return Result; 784821480d2SRafael Auler } 785a0dd5b05SAlexander Shaposhnikov 786ba31344fSRafael Auler #else 787a0dd5b05SAlexander Shaposhnikov 78816a497c6SRafael Auler ProfileWriterContext readDescriptions() { 78916a497c6SRafael Auler ProfileWriterContext Result; 790a0dd5b05SAlexander Shaposhnikov uint8_t *Tables = _bolt_instr_tables_getter(); 791a0dd5b05SAlexander Shaposhnikov uint32_t IndCallDescSize = *reinterpret_cast<uint32_t *>(Tables); 792a0dd5b05SAlexander Shaposhnikov uint32_t IndCallTargetDescSize = 793a0dd5b05SAlexander Shaposhnikov *reinterpret_cast<uint32_t *>(Tables + 4 + IndCallDescSize); 794a0dd5b05SAlexander Shaposhnikov uint32_t FuncDescSize = *reinterpret_cast<uint32_t *>( 795a0dd5b05SAlexander Shaposhnikov Tables + 8 + IndCallDescSize + IndCallTargetDescSize); 796a0dd5b05SAlexander Shaposhnikov Result.IndCallDescriptions = 797a0dd5b05SAlexander Shaposhnikov reinterpret_cast<IndCallDescription *>(Tables + 4); 798a0dd5b05SAlexander Shaposhnikov Result.IndCallTargets = reinterpret_cast<IndCallTargetDescription *>( 799a0dd5b05SAlexander Shaposhnikov Tables + 8 + IndCallDescSize); 800a0dd5b05SAlexander Shaposhnikov Result.FuncDescriptions = 801a0dd5b05SAlexander Shaposhnikov Tables + 12 + IndCallDescSize + IndCallTargetDescSize; 802a0dd5b05SAlexander Shaposhnikov Result.Strings = reinterpret_cast<char *>( 803a0dd5b05SAlexander Shaposhnikov Tables + 12 + IndCallDescSize + IndCallTargetDescSize + FuncDescSize); 804ba31344fSRafael Auler return Result; 805ba31344fSRafael Auler } 806a0dd5b05SAlexander Shaposhnikov 807ba31344fSRafael Auler #endif 808821480d2SRafael Auler 809a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__) 81016a497c6SRafael Auler /// Debug by printing overall metadata global numbers to check it is sane 81116a497c6SRafael Auler void printStats(const ProfileWriterContext &Ctx) { 812cc4b2fb6SRafael Auler char StatMsg[BufSize]; 813cc4b2fb6SRafael Auler char *StatPtr = StatMsg; 81416a497c6SRafael Auler StatPtr = 81516a497c6SRafael Auler strCopy(StatPtr, 81616a497c6SRafael Auler "\nBOLT INSTRUMENTATION RUNTIME STATISTICS\n\nIndCallDescSize: "); 817cc4b2fb6SRafael Auler StatPtr = intToStr(StatPtr, 81816a497c6SRafael Auler Ctx.FuncDescriptions - 81916a497c6SRafael Auler reinterpret_cast<uint8_t *>(Ctx.IndCallDescriptions), 820cc4b2fb6SRafael Auler 10); 821cc4b2fb6SRafael Auler StatPtr = strCopy(StatPtr, "\nFuncDescSize: "); 822cc4b2fb6SRafael Auler StatPtr = intToStr( 823cc4b2fb6SRafael Auler StatPtr, 82416a497c6SRafael Auler reinterpret_cast<uint8_t *>(Ctx.Strings) - Ctx.FuncDescriptions, 10); 82516a497c6SRafael Auler StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_ind_calls: "); 82616a497c6SRafael Auler StatPtr = intToStr(StatPtr, __bolt_instr_num_ind_calls, 10); 827cc4b2fb6SRafael Auler StatPtr = strCopy(StatPtr, "\n__bolt_instr_num_funcs: "); 828cc4b2fb6SRafael Auler StatPtr = intToStr(StatPtr, __bolt_instr_num_funcs, 10); 829cc4b2fb6SRafael Auler StatPtr = strCopy(StatPtr, "\n"); 830cc4b2fb6SRafael Auler __write(2, StatMsg, StatPtr - StatMsg); 831cc4b2fb6SRafael Auler } 832a0dd5b05SAlexander Shaposhnikov #endif 833a0dd5b05SAlexander Shaposhnikov 834cc4b2fb6SRafael Auler 835cc4b2fb6SRafael Auler /// This is part of a simple CFG representation in memory, where we store 836cc4b2fb6SRafael Auler /// a dynamically sized array of input and output edges per node, and store 837cc4b2fb6SRafael Auler /// a dynamically sized array of nodes per graph. We also store the spanning 838cc4b2fb6SRafael Auler /// tree edges for that CFG in a separate array of nodes in 839cc4b2fb6SRafael Auler /// \p SpanningTreeNodes, while the regular nodes live in \p CFGNodes. 840cc4b2fb6SRafael Auler struct Edge { 841cc4b2fb6SRafael Auler uint32_t Node; // Index in nodes array regarding the destination of this edge 842cc4b2fb6SRafael Auler uint32_t ID; // Edge index in an array comprising all edges of the graph 843cc4b2fb6SRafael Auler }; 844cc4b2fb6SRafael Auler 845cc4b2fb6SRafael Auler /// A regular graph node or a spanning tree node 846cc4b2fb6SRafael Auler struct Node { 847cc4b2fb6SRafael Auler uint32_t NumInEdges{0}; // Input edge count used to size InEdge 848cc4b2fb6SRafael Auler uint32_t NumOutEdges{0}; // Output edge count used to size OutEdges 849cc4b2fb6SRafael Auler Edge *InEdges{nullptr}; // Created and managed by \p Graph 850cc4b2fb6SRafael Auler Edge *OutEdges{nullptr}; // ditto 851cc4b2fb6SRafael Auler }; 852cc4b2fb6SRafael Auler 853cc4b2fb6SRafael Auler /// Main class for CFG representation in memory. Manages object creation and 854cc4b2fb6SRafael Auler /// destruction, populates an array of CFG nodes as well as corresponding 855cc4b2fb6SRafael Auler /// spanning tree nodes. 856cc4b2fb6SRafael Auler struct Graph { 857cc4b2fb6SRafael Auler uint32_t NumNodes; 858cc4b2fb6SRafael Auler Node *CFGNodes; 859cc4b2fb6SRafael Auler Node *SpanningTreeNodes; 86016a497c6SRafael Auler uint64_t *EdgeFreqs; 86116a497c6SRafael Auler uint64_t *CallFreqs; 862cc4b2fb6SRafael Auler BumpPtrAllocator &Alloc; 86316a497c6SRafael Auler const FunctionDescription &D; 864cc4b2fb6SRafael Auler 86516a497c6SRafael Auler /// Reads a list of edges from function description \p D and builds 866cc4b2fb6SRafael Auler /// the graph from it. Allocates several internal dynamic structures that are 86716a497c6SRafael Auler /// later destroyed by ~Graph() and uses \p Alloc. D.LeafNodes contain all 868cc4b2fb6SRafael Auler /// spanning tree leaf nodes descriptions (their counters). They are the seed 869cc4b2fb6SRafael Auler /// used to compute the rest of the missing edge counts in a bottom-up 870cc4b2fb6SRafael Auler /// traversal of the spanning tree. 87116a497c6SRafael Auler Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D, 87216a497c6SRafael Auler const uint64_t *Counters, ProfileWriterContext &Ctx); 873cc4b2fb6SRafael Auler ~Graph(); 874cc4b2fb6SRafael Auler void dump() const; 87516a497c6SRafael Auler 87616a497c6SRafael Auler private: 87716a497c6SRafael Auler void computeEdgeFrequencies(const uint64_t *Counters, 87816a497c6SRafael Auler ProfileWriterContext &Ctx); 87916a497c6SRafael Auler void dumpEdgeFreqs() const; 880cc4b2fb6SRafael Auler }; 881cc4b2fb6SRafael Auler 88216a497c6SRafael Auler Graph::Graph(BumpPtrAllocator &Alloc, const FunctionDescription &D, 88316a497c6SRafael Auler const uint64_t *Counters, ProfileWriterContext &Ctx) 88416a497c6SRafael Auler : Alloc(Alloc), D(D) { 885cc4b2fb6SRafael Auler DEBUG(reportNumber("G = 0x", (uint64_t)this, 16)); 886cc4b2fb6SRafael Auler // First pass to determine number of nodes 88716a497c6SRafael Auler int32_t MaxNodes = -1; 88816a497c6SRafael Auler CallFreqs = nullptr; 88916a497c6SRafael Auler EdgeFreqs = nullptr; 89016a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) { 89116a497c6SRafael Auler if (static_cast<int32_t>(D.Edges[I].FromNode) > MaxNodes) 89216a497c6SRafael Auler MaxNodes = D.Edges[I].FromNode; 89316a497c6SRafael Auler if (static_cast<int32_t>(D.Edges[I].ToNode) > MaxNodes) 89416a497c6SRafael Auler MaxNodes = D.Edges[I].ToNode; 895cc4b2fb6SRafael Auler } 896a0dd5b05SAlexander Shaposhnikov 897883bf0e8SAmir Ayupov for (int I = 0; I < D.NumLeafNodes; ++I) 89816a497c6SRafael Auler if (static_cast<int32_t>(D.LeafNodes[I].Node) > MaxNodes) 89916a497c6SRafael Auler MaxNodes = D.LeafNodes[I].Node; 900883bf0e8SAmir Ayupov 901883bf0e8SAmir Ayupov for (int I = 0; I < D.NumCalls; ++I) 90216a497c6SRafael Auler if (static_cast<int32_t>(D.Calls[I].FromNode) > MaxNodes) 90316a497c6SRafael Auler MaxNodes = D.Calls[I].FromNode; 904883bf0e8SAmir Ayupov 90516a497c6SRafael Auler // No nodes? Nothing to do 90616a497c6SRafael Auler if (MaxNodes < 0) { 90716a497c6SRafael Auler DEBUG(report("No nodes!\n")); 908cc4b2fb6SRafael Auler CFGNodes = nullptr; 909cc4b2fb6SRafael Auler SpanningTreeNodes = nullptr; 910cc4b2fb6SRafael Auler NumNodes = 0; 911cc4b2fb6SRafael Auler return; 912cc4b2fb6SRafael Auler } 913cc4b2fb6SRafael Auler ++MaxNodes; 914cc4b2fb6SRafael Auler DEBUG(reportNumber("NumNodes = ", MaxNodes, 10)); 91516a497c6SRafael Auler NumNodes = static_cast<uint32_t>(MaxNodes); 916cc4b2fb6SRafael Auler 917cc4b2fb6SRafael Auler // Initial allocations 918cc4b2fb6SRafael Auler CFGNodes = new (Alloc) Node[MaxNodes]; 919a0dd5b05SAlexander Shaposhnikov 920cc4b2fb6SRafael Auler DEBUG(reportNumber("G->CFGNodes = 0x", (uint64_t)CFGNodes, 16)); 921cc4b2fb6SRafael Auler SpanningTreeNodes = new (Alloc) Node[MaxNodes]; 922cc4b2fb6SRafael Auler DEBUG(reportNumber("G->SpanningTreeNodes = 0x", 923cc4b2fb6SRafael Auler (uint64_t)SpanningTreeNodes, 16)); 924cc4b2fb6SRafael Auler 925cc4b2fb6SRafael Auler // Figure out how much to allocate to each vector (in/out edge sets) 92616a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) { 92716a497c6SRafael Auler CFGNodes[D.Edges[I].FromNode].NumOutEdges++; 92816a497c6SRafael Auler CFGNodes[D.Edges[I].ToNode].NumInEdges++; 92916a497c6SRafael Auler if (D.Edges[I].Counter != 0xffffffff) 930cc4b2fb6SRafael Auler continue; 931cc4b2fb6SRafael Auler 93216a497c6SRafael Auler SpanningTreeNodes[D.Edges[I].FromNode].NumOutEdges++; 93316a497c6SRafael Auler SpanningTreeNodes[D.Edges[I].ToNode].NumInEdges++; 934cc4b2fb6SRafael Auler } 935cc4b2fb6SRafael Auler 936cc4b2fb6SRafael Auler // Allocate in/out edge sets 937cc4b2fb6SRafael Auler for (int I = 0; I < MaxNodes; ++I) { 938cc4b2fb6SRafael Auler if (CFGNodes[I].NumInEdges > 0) 939cc4b2fb6SRafael Auler CFGNodes[I].InEdges = new (Alloc) Edge[CFGNodes[I].NumInEdges]; 940cc4b2fb6SRafael Auler if (CFGNodes[I].NumOutEdges > 0) 941cc4b2fb6SRafael Auler CFGNodes[I].OutEdges = new (Alloc) Edge[CFGNodes[I].NumOutEdges]; 942cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].NumInEdges > 0) 943cc4b2fb6SRafael Auler SpanningTreeNodes[I].InEdges = 944cc4b2fb6SRafael Auler new (Alloc) Edge[SpanningTreeNodes[I].NumInEdges]; 945cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].NumOutEdges > 0) 946cc4b2fb6SRafael Auler SpanningTreeNodes[I].OutEdges = 947cc4b2fb6SRafael Auler new (Alloc) Edge[SpanningTreeNodes[I].NumOutEdges]; 948cc4b2fb6SRafael Auler CFGNodes[I].NumInEdges = 0; 949cc4b2fb6SRafael Auler CFGNodes[I].NumOutEdges = 0; 950cc4b2fb6SRafael Auler SpanningTreeNodes[I].NumInEdges = 0; 951cc4b2fb6SRafael Auler SpanningTreeNodes[I].NumOutEdges = 0; 952cc4b2fb6SRafael Auler } 953cc4b2fb6SRafael Auler 954cc4b2fb6SRafael Auler // Fill in/out edge sets 95516a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) { 95616a497c6SRafael Auler const uint32_t Src = D.Edges[I].FromNode; 95716a497c6SRafael Auler const uint32_t Dst = D.Edges[I].ToNode; 958cc4b2fb6SRafael Auler Edge *E = &CFGNodes[Src].OutEdges[CFGNodes[Src].NumOutEdges++]; 959cc4b2fb6SRafael Auler E->Node = Dst; 960cc4b2fb6SRafael Auler E->ID = I; 961cc4b2fb6SRafael Auler 962cc4b2fb6SRafael Auler E = &CFGNodes[Dst].InEdges[CFGNodes[Dst].NumInEdges++]; 963cc4b2fb6SRafael Auler E->Node = Src; 964cc4b2fb6SRafael Auler E->ID = I; 965cc4b2fb6SRafael Auler 96616a497c6SRafael Auler if (D.Edges[I].Counter != 0xffffffff) 967cc4b2fb6SRafael Auler continue; 968cc4b2fb6SRafael Auler 969cc4b2fb6SRafael Auler E = &SpanningTreeNodes[Src] 970cc4b2fb6SRafael Auler .OutEdges[SpanningTreeNodes[Src].NumOutEdges++]; 971cc4b2fb6SRafael Auler E->Node = Dst; 972cc4b2fb6SRafael Auler E->ID = I; 973cc4b2fb6SRafael Auler 974cc4b2fb6SRafael Auler E = &SpanningTreeNodes[Dst] 975cc4b2fb6SRafael Auler .InEdges[SpanningTreeNodes[Dst].NumInEdges++]; 976cc4b2fb6SRafael Auler E->Node = Src; 977cc4b2fb6SRafael Auler E->ID = I; 978cc4b2fb6SRafael Auler } 97916a497c6SRafael Auler 98016a497c6SRafael Auler computeEdgeFrequencies(Counters, Ctx); 981cc4b2fb6SRafael Auler } 982cc4b2fb6SRafael Auler 983cc4b2fb6SRafael Auler Graph::~Graph() { 98416a497c6SRafael Auler if (CallFreqs) 98516a497c6SRafael Auler Alloc.deallocate(CallFreqs); 98616a497c6SRafael Auler if (EdgeFreqs) 98716a497c6SRafael Auler Alloc.deallocate(EdgeFreqs); 988cc4b2fb6SRafael Auler for (int I = NumNodes - 1; I >= 0; --I) { 989cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].OutEdges) 990cc4b2fb6SRafael Auler Alloc.deallocate(SpanningTreeNodes[I].OutEdges); 991cc4b2fb6SRafael Auler if (SpanningTreeNodes[I].InEdges) 992cc4b2fb6SRafael Auler Alloc.deallocate(SpanningTreeNodes[I].InEdges); 993cc4b2fb6SRafael Auler if (CFGNodes[I].OutEdges) 994cc4b2fb6SRafael Auler Alloc.deallocate(CFGNodes[I].OutEdges); 995cc4b2fb6SRafael Auler if (CFGNodes[I].InEdges) 996cc4b2fb6SRafael Auler Alloc.deallocate(CFGNodes[I].InEdges); 997cc4b2fb6SRafael Auler } 998cc4b2fb6SRafael Auler if (SpanningTreeNodes) 999cc4b2fb6SRafael Auler Alloc.deallocate(SpanningTreeNodes); 1000cc4b2fb6SRafael Auler if (CFGNodes) 1001cc4b2fb6SRafael Auler Alloc.deallocate(CFGNodes); 1002cc4b2fb6SRafael Auler } 1003cc4b2fb6SRafael Auler 1004cc4b2fb6SRafael Auler void Graph::dump() const { 1005cc4b2fb6SRafael Auler reportNumber("Dumping graph with number of nodes: ", NumNodes, 10); 1006cc4b2fb6SRafael Auler report(" Full graph:\n"); 1007cc4b2fb6SRafael Auler for (int I = 0; I < NumNodes; ++I) { 1008cc4b2fb6SRafael Auler const Node *N = &CFGNodes[I]; 1009cc4b2fb6SRafael Auler reportNumber(" Node #", I, 10); 1010cc4b2fb6SRafael Auler reportNumber(" InEdges total ", N->NumInEdges, 10); 1011cc4b2fb6SRafael Auler for (int J = 0; J < N->NumInEdges; ++J) 1012cc4b2fb6SRafael Auler reportNumber(" ", N->InEdges[J].Node, 10); 1013cc4b2fb6SRafael Auler reportNumber(" OutEdges total ", N->NumOutEdges, 10); 1014cc4b2fb6SRafael Auler for (int J = 0; J < N->NumOutEdges; ++J) 1015cc4b2fb6SRafael Auler reportNumber(" ", N->OutEdges[J].Node, 10); 1016cc4b2fb6SRafael Auler report("\n"); 1017cc4b2fb6SRafael Auler } 1018cc4b2fb6SRafael Auler report(" Spanning tree:\n"); 1019cc4b2fb6SRafael Auler for (int I = 0; I < NumNodes; ++I) { 1020cc4b2fb6SRafael Auler const Node *N = &SpanningTreeNodes[I]; 1021cc4b2fb6SRafael Auler reportNumber(" Node #", I, 10); 1022cc4b2fb6SRafael Auler reportNumber(" InEdges total ", N->NumInEdges, 10); 1023cc4b2fb6SRafael Auler for (int J = 0; J < N->NumInEdges; ++J) 1024cc4b2fb6SRafael Auler reportNumber(" ", N->InEdges[J].Node, 10); 1025cc4b2fb6SRafael Auler reportNumber(" OutEdges total ", N->NumOutEdges, 10); 1026cc4b2fb6SRafael Auler for (int J = 0; J < N->NumOutEdges; ++J) 1027cc4b2fb6SRafael Auler reportNumber(" ", N->OutEdges[J].Node, 10); 1028cc4b2fb6SRafael Auler report("\n"); 1029cc4b2fb6SRafael Auler } 1030cc4b2fb6SRafael Auler } 1031cc4b2fb6SRafael Auler 103216a497c6SRafael Auler void Graph::dumpEdgeFreqs() const { 103316a497c6SRafael Auler reportNumber( 103416a497c6SRafael Auler "Dumping edge frequencies for graph with num edges: ", D.NumEdges, 10); 103516a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) { 103616a497c6SRafael Auler reportNumber("* Src: ", D.Edges[I].FromNode, 10); 103716a497c6SRafael Auler reportNumber(" Dst: ", D.Edges[I].ToNode, 10); 1038cc4b2fb6SRafael Auler reportNumber(" Cnt: ", EdgeFreqs[I], 10); 1039cc4b2fb6SRafael Auler } 1040cc4b2fb6SRafael Auler } 1041cc4b2fb6SRafael Auler 104216a497c6SRafael Auler /// Auxiliary map structure for fast lookups of which calls map to each node of 104316a497c6SRafael Auler /// the function CFG 104416a497c6SRafael Auler struct NodeToCallsMap { 104516a497c6SRafael Auler struct MapEntry { 104616a497c6SRafael Auler uint32_t NumCalls; 104716a497c6SRafael Auler uint32_t *Calls; 104816a497c6SRafael Auler }; 104916a497c6SRafael Auler MapEntry *Entries; 105016a497c6SRafael Auler BumpPtrAllocator &Alloc; 105116a497c6SRafael Auler const uint32_t NumNodes; 1052cc4b2fb6SRafael Auler 105316a497c6SRafael Auler NodeToCallsMap(BumpPtrAllocator &Alloc, const FunctionDescription &D, 105416a497c6SRafael Auler uint32_t NumNodes) 105516a497c6SRafael Auler : Alloc(Alloc), NumNodes(NumNodes) { 105616a497c6SRafael Auler Entries = new (Alloc, 0) MapEntry[NumNodes]; 105716a497c6SRafael Auler for (int I = 0; I < D.NumCalls; ++I) { 105816a497c6SRafael Auler DEBUG(reportNumber("Registering call in node ", D.Calls[I].FromNode, 10)); 105916a497c6SRafael Auler ++Entries[D.Calls[I].FromNode].NumCalls; 106016a497c6SRafael Auler } 106116a497c6SRafael Auler for (int I = 0; I < NumNodes; ++I) { 106216a497c6SRafael Auler Entries[I].Calls = Entries[I].NumCalls ? new (Alloc) 106316a497c6SRafael Auler uint32_t[Entries[I].NumCalls] 106416a497c6SRafael Auler : nullptr; 106516a497c6SRafael Auler Entries[I].NumCalls = 0; 106616a497c6SRafael Auler } 106716a497c6SRafael Auler for (int I = 0; I < D.NumCalls; ++I) { 1068c7306cc2SAmir Ayupov MapEntry &Entry = Entries[D.Calls[I].FromNode]; 106916a497c6SRafael Auler Entry.Calls[Entry.NumCalls++] = I; 107016a497c6SRafael Auler } 107116a497c6SRafael Auler } 107216a497c6SRafael Auler 107316a497c6SRafael Auler /// Set the frequency of all calls in node \p NodeID to Freq. However, if 107416a497c6SRafael Auler /// the calls have their own counters and do not depend on the basic block 107516a497c6SRafael Auler /// counter, this means they have landing pads and throw exceptions. In this 107616a497c6SRafael Auler /// case, set their frequency with their counters and return the maximum 107716a497c6SRafael Auler /// value observed in such counters. This will be used as the new frequency 107816a497c6SRafael Auler /// at basic block entry. This is used to fix the CFG edge frequencies in the 107916a497c6SRafael Auler /// presence of exceptions. 108016a497c6SRafael Auler uint64_t visitAllCallsIn(uint32_t NodeID, uint64_t Freq, uint64_t *CallFreqs, 108116a497c6SRafael Auler const FunctionDescription &D, 108216a497c6SRafael Auler const uint64_t *Counters, 108316a497c6SRafael Auler ProfileWriterContext &Ctx) const { 1084c7306cc2SAmir Ayupov const MapEntry &Entry = Entries[NodeID]; 108516a497c6SRafael Auler uint64_t MaxValue = 0ull; 108616a497c6SRafael Auler for (int I = 0, E = Entry.NumCalls; I != E; ++I) { 1087c7306cc2SAmir Ayupov const uint32_t CallID = Entry.Calls[I]; 108816a497c6SRafael Auler DEBUG(reportNumber(" Setting freq for call ID: ", CallID, 10)); 1089c7306cc2SAmir Ayupov const CallDescription &CallDesc = D.Calls[CallID]; 109016a497c6SRafael Auler if (CallDesc.Counter == 0xffffffff) { 109116a497c6SRafael Auler CallFreqs[CallID] = Freq; 109216a497c6SRafael Auler DEBUG(reportNumber(" with : ", Freq, 10)); 109316a497c6SRafael Auler } else { 1094c7306cc2SAmir Ayupov const uint64_t CounterVal = Counters[CallDesc.Counter]; 109516a497c6SRafael Auler CallFreqs[CallID] = CounterVal; 109616a497c6SRafael Auler MaxValue = CounterVal > MaxValue ? CounterVal : MaxValue; 109716a497c6SRafael Auler DEBUG(reportNumber(" with (private counter) : ", CounterVal, 10)); 109816a497c6SRafael Auler } 109916a497c6SRafael Auler DEBUG(reportNumber(" Address: 0x", CallDesc.TargetAddress, 16)); 110016a497c6SRafael Auler if (CallFreqs[CallID] > 0) 110116a497c6SRafael Auler Ctx.CallFlowTable->get(CallDesc.TargetAddress).Calls += 110216a497c6SRafael Auler CallFreqs[CallID]; 110316a497c6SRafael Auler } 110416a497c6SRafael Auler return MaxValue; 110516a497c6SRafael Auler } 110616a497c6SRafael Auler 110716a497c6SRafael Auler ~NodeToCallsMap() { 1108883bf0e8SAmir Ayupov for (int I = NumNodes - 1; I >= 0; --I) 110916a497c6SRafael Auler if (Entries[I].Calls) 111016a497c6SRafael Auler Alloc.deallocate(Entries[I].Calls); 111116a497c6SRafael Auler Alloc.deallocate(Entries); 111216a497c6SRafael Auler } 111316a497c6SRafael Auler }; 111416a497c6SRafael Auler 111516a497c6SRafael Auler /// Fill an array with the frequency of each edge in the function represented 111616a497c6SRafael Auler /// by G, as well as another array for each call. 111716a497c6SRafael Auler void Graph::computeEdgeFrequencies(const uint64_t *Counters, 111816a497c6SRafael Auler ProfileWriterContext &Ctx) { 111916a497c6SRafael Auler if (NumNodes == 0) 112016a497c6SRafael Auler return; 112116a497c6SRafael Auler 112216a497c6SRafael Auler EdgeFreqs = D.NumEdges ? new (Alloc, 0) uint64_t [D.NumEdges] : nullptr; 112316a497c6SRafael Auler CallFreqs = D.NumCalls ? new (Alloc, 0) uint64_t [D.NumCalls] : nullptr; 112416a497c6SRafael Auler 112516a497c6SRafael Auler // Setup a lookup for calls present in each node (BB) 112616a497c6SRafael Auler NodeToCallsMap *CallMap = new (Alloc) NodeToCallsMap(Alloc, D, NumNodes); 1127cc4b2fb6SRafael Auler 1128cc4b2fb6SRafael Auler // Perform a bottom-up, BFS traversal of the spanning tree in G. Edges in the 1129cc4b2fb6SRafael Auler // spanning tree don't have explicit counters. We must infer their value using 1130cc4b2fb6SRafael Auler // a linear combination of other counters (sum of counters of the outgoing 1131cc4b2fb6SRafael Auler // edges minus sum of counters of the incoming edges). 113216a497c6SRafael Auler uint32_t *Stack = new (Alloc) uint32_t [NumNodes]; 1133cc4b2fb6SRafael Auler uint32_t StackTop = 0; 1134cc4b2fb6SRafael Auler enum Status : uint8_t { S_NEW = 0, S_VISITING, S_VISITED }; 113516a497c6SRafael Auler Status *Visited = new (Alloc, 0) Status[NumNodes]; 113616a497c6SRafael Auler uint64_t *LeafFrequency = new (Alloc, 0) uint64_t[NumNodes]; 113716a497c6SRafael Auler uint64_t *EntryAddress = new (Alloc, 0) uint64_t[NumNodes]; 1138cc4b2fb6SRafael Auler 1139cc4b2fb6SRafael Auler // Setup a fast lookup for frequency of leaf nodes, which have special 1140cc4b2fb6SRafael Auler // basic block frequency instrumentation (they are not edge profiled). 114116a497c6SRafael Auler for (int I = 0; I < D.NumLeafNodes; ++I) { 114216a497c6SRafael Auler LeafFrequency[D.LeafNodes[I].Node] = Counters[D.LeafNodes[I].Counter]; 1143cc4b2fb6SRafael Auler DEBUG({ 114416a497c6SRafael Auler if (Counters[D.LeafNodes[I].Counter] > 0) { 114516a497c6SRafael Auler reportNumber("Leaf Node# ", D.LeafNodes[I].Node, 10); 114616a497c6SRafael Auler reportNumber(" Counter: ", Counters[D.LeafNodes[I].Counter], 10); 1147cc4b2fb6SRafael Auler } 1148cc4b2fb6SRafael Auler }); 114916a497c6SRafael Auler } 115016a497c6SRafael Auler for (int I = 0; I < D.NumEntryNodes; ++I) { 115116a497c6SRafael Auler EntryAddress[D.EntryNodes[I].Node] = D.EntryNodes[I].Address; 115216a497c6SRafael Auler DEBUG({ 115316a497c6SRafael Auler reportNumber("Entry Node# ", D.EntryNodes[I].Node, 10); 115416a497c6SRafael Auler reportNumber(" Address: ", D.EntryNodes[I].Address, 16); 115516a497c6SRafael Auler }); 1156cc4b2fb6SRafael Auler } 1157cc4b2fb6SRafael Auler // Add all root nodes to the stack 1158883bf0e8SAmir Ayupov for (int I = 0; I < NumNodes; ++I) 115916a497c6SRafael Auler if (SpanningTreeNodes[I].NumInEdges == 0) 1160cc4b2fb6SRafael Auler Stack[StackTop++] = I; 1161883bf0e8SAmir Ayupov 1162cc4b2fb6SRafael Auler // Empty stack? 1163cc4b2fb6SRafael Auler if (StackTop == 0) { 116416a497c6SRafael Auler DEBUG(report("Empty stack!\n")); 116516a497c6SRafael Auler Alloc.deallocate(EntryAddress); 1166cc4b2fb6SRafael Auler Alloc.deallocate(LeafFrequency); 1167cc4b2fb6SRafael Auler Alloc.deallocate(Visited); 1168cc4b2fb6SRafael Auler Alloc.deallocate(Stack); 116916a497c6SRafael Auler CallMap->~NodeToCallsMap(); 117016a497c6SRafael Auler Alloc.deallocate(CallMap); 117116a497c6SRafael Auler if (CallFreqs) 117216a497c6SRafael Auler Alloc.deallocate(CallFreqs); 117316a497c6SRafael Auler if (EdgeFreqs) 117416a497c6SRafael Auler Alloc.deallocate(EdgeFreqs); 117516a497c6SRafael Auler EdgeFreqs = nullptr; 117616a497c6SRafael Auler CallFreqs = nullptr; 117716a497c6SRafael Auler return; 1178cc4b2fb6SRafael Auler } 1179cc4b2fb6SRafael Auler // Add all known edge counts, will infer the rest 118016a497c6SRafael Auler for (int I = 0; I < D.NumEdges; ++I) { 118116a497c6SRafael Auler const uint32_t C = D.Edges[I].Counter; 1182cc4b2fb6SRafael Auler if (C == 0xffffffff) // inferred counter - we will compute its value 1183cc4b2fb6SRafael Auler continue; 118416a497c6SRafael Auler EdgeFreqs[I] = Counters[C]; 1185cc4b2fb6SRafael Auler } 1186cc4b2fb6SRafael Auler 1187cc4b2fb6SRafael Auler while (StackTop > 0) { 1188cc4b2fb6SRafael Auler const uint32_t Cur = Stack[--StackTop]; 1189cc4b2fb6SRafael Auler DEBUG({ 1190cc4b2fb6SRafael Auler if (Visited[Cur] == S_VISITING) 1191cc4b2fb6SRafael Auler report("(visiting) "); 1192cc4b2fb6SRafael Auler else 1193cc4b2fb6SRafael Auler report("(new) "); 1194cc4b2fb6SRafael Auler reportNumber("Cur: ", Cur, 10); 1195cc4b2fb6SRafael Auler }); 1196cc4b2fb6SRafael Auler 1197cc4b2fb6SRafael Auler // This shouldn't happen in a tree 1198cc4b2fb6SRafael Auler assert(Visited[Cur] != S_VISITED, "should not have visited nodes in stack"); 1199cc4b2fb6SRafael Auler if (Visited[Cur] == S_NEW) { 1200cc4b2fb6SRafael Auler Visited[Cur] = S_VISITING; 1201cc4b2fb6SRafael Auler Stack[StackTop++] = Cur; 120216a497c6SRafael Auler assert(StackTop <= NumNodes, "stack grew too large"); 120316a497c6SRafael Auler for (int I = 0, E = SpanningTreeNodes[Cur].NumOutEdges; I < E; ++I) { 120416a497c6SRafael Auler const uint32_t Succ = SpanningTreeNodes[Cur].OutEdges[I].Node; 1205cc4b2fb6SRafael Auler Stack[StackTop++] = Succ; 120616a497c6SRafael Auler assert(StackTop <= NumNodes, "stack grew too large"); 1207cc4b2fb6SRafael Auler } 1208cc4b2fb6SRafael Auler continue; 1209cc4b2fb6SRafael Auler } 1210cc4b2fb6SRafael Auler Visited[Cur] = S_VISITED; 1211cc4b2fb6SRafael Auler 1212cc4b2fb6SRafael Auler // Establish our node frequency based on outgoing edges, which should all be 1213cc4b2fb6SRafael Auler // resolved by now. 1214cc4b2fb6SRafael Auler int64_t CurNodeFreq = LeafFrequency[Cur]; 1215cc4b2fb6SRafael Auler // Not a leaf? 1216cc4b2fb6SRafael Auler if (!CurNodeFreq) { 121716a497c6SRafael Auler for (int I = 0, E = CFGNodes[Cur].NumOutEdges; I != E; ++I) { 121816a497c6SRafael Auler const uint32_t SuccEdge = CFGNodes[Cur].OutEdges[I].ID; 121916a497c6SRafael Auler CurNodeFreq += EdgeFreqs[SuccEdge]; 1220cc4b2fb6SRafael Auler } 1221cc4b2fb6SRafael Auler } 122216a497c6SRafael Auler if (CurNodeFreq < 0) 122316a497c6SRafael Auler CurNodeFreq = 0; 122416a497c6SRafael Auler 122516a497c6SRafael Auler const uint64_t CallFreq = CallMap->visitAllCallsIn( 122616a497c6SRafael Auler Cur, CurNodeFreq > 0 ? CurNodeFreq : 0, CallFreqs, D, Counters, Ctx); 122716a497c6SRafael Auler 122816a497c6SRafael Auler // Exception handling affected our output flow? Fix with calls info 122916a497c6SRafael Auler DEBUG({ 123016a497c6SRafael Auler if (CallFreq > CurNodeFreq) 123116a497c6SRafael Auler report("Bumping node frequency with call info\n"); 123216a497c6SRafael Auler }); 123316a497c6SRafael Auler CurNodeFreq = CallFreq > CurNodeFreq ? CallFreq : CurNodeFreq; 123416a497c6SRafael Auler 123516a497c6SRafael Auler if (CurNodeFreq > 0) { 123616a497c6SRafael Auler if (uint64_t Addr = EntryAddress[Cur]) { 123716a497c6SRafael Auler DEBUG( 123816a497c6SRafael Auler reportNumber(" Setting flow at entry point address 0x", Addr, 16)); 123916a497c6SRafael Auler DEBUG(reportNumber(" with: ", CurNodeFreq, 10)); 124016a497c6SRafael Auler Ctx.CallFlowTable->get(Addr).Val = CurNodeFreq; 124116a497c6SRafael Auler } 124216a497c6SRafael Auler } 124316a497c6SRafael Auler 124416a497c6SRafael Auler // No parent? Reached a tree root, limit to call frequency updating. 1245883bf0e8SAmir Ayupov if (SpanningTreeNodes[Cur].NumInEdges == 0) 124616a497c6SRafael Auler continue; 124716a497c6SRafael Auler 124816a497c6SRafael Auler assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent"); 124916a497c6SRafael Auler const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node; 125016a497c6SRafael Auler const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID; 125116a497c6SRafael Auler 1252cc4b2fb6SRafael Auler // Calculate parent edge freq. 125316a497c6SRafael Auler int64_t ParentEdgeFreq = CurNodeFreq; 125416a497c6SRafael Auler for (int I = 0, E = CFGNodes[Cur].NumInEdges; I != E; ++I) { 125516a497c6SRafael Auler const uint32_t PredEdge = CFGNodes[Cur].InEdges[I].ID; 125616a497c6SRafael Auler ParentEdgeFreq -= EdgeFreqs[PredEdge]; 1257cc4b2fb6SRafael Auler } 125816a497c6SRafael Auler 1259cc4b2fb6SRafael Auler // Sometimes the conservative CFG that BOLT builds will lead to incorrect 1260cc4b2fb6SRafael Auler // flow computation. For example, in a BB that transitively calls the exit 1261cc4b2fb6SRafael Auler // syscall, BOLT will add a fall-through successor even though it should not 1262cc4b2fb6SRafael Auler // have any successors. So this block execution will likely be wrong. We 1263cc4b2fb6SRafael Auler // tolerate this imperfection since this case should be quite infrequent. 1264cc4b2fb6SRafael Auler if (ParentEdgeFreq < 0) { 126516a497c6SRafael Auler DEBUG(dumpEdgeFreqs()); 1266cc4b2fb6SRafael Auler DEBUG(report("WARNING: incorrect flow")); 1267cc4b2fb6SRafael Auler ParentEdgeFreq = 0; 1268cc4b2fb6SRafael Auler } 1269cc4b2fb6SRafael Auler DEBUG(reportNumber(" Setting freq for ParentEdge: ", ParentEdge, 10)); 1270cc4b2fb6SRafael Auler DEBUG(reportNumber(" with ParentEdgeFreq: ", ParentEdgeFreq, 10)); 127116a497c6SRafael Auler EdgeFreqs[ParentEdge] = ParentEdgeFreq; 1272cc4b2fb6SRafael Auler } 1273cc4b2fb6SRafael Auler 127416a497c6SRafael Auler Alloc.deallocate(EntryAddress); 1275cc4b2fb6SRafael Auler Alloc.deallocate(LeafFrequency); 1276cc4b2fb6SRafael Auler Alloc.deallocate(Visited); 1277cc4b2fb6SRafael Auler Alloc.deallocate(Stack); 127816a497c6SRafael Auler CallMap->~NodeToCallsMap(); 127916a497c6SRafael Auler Alloc.deallocate(CallMap); 128016a497c6SRafael Auler DEBUG(dumpEdgeFreqs()); 1281cc4b2fb6SRafael Auler } 1282cc4b2fb6SRafael Auler 128316a497c6SRafael Auler /// Write to \p FD all of the edge profiles for function \p FuncDesc. Uses 128416a497c6SRafael Auler /// \p Alloc to allocate helper dynamic structures used to compute profile for 128516a497c6SRafael Auler /// edges that we do not explictly instrument. 128616a497c6SRafael Auler const uint8_t *writeFunctionProfile(int FD, ProfileWriterContext &Ctx, 128716a497c6SRafael Auler const uint8_t *FuncDesc, 128816a497c6SRafael Auler BumpPtrAllocator &Alloc) { 128916a497c6SRafael Auler const FunctionDescription F(FuncDesc); 129016a497c6SRafael Auler const uint8_t *next = FuncDesc + F.getSize(); 1291cc4b2fb6SRafael Auler 1292a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__) 1293a0dd5b05SAlexander Shaposhnikov uint64_t *bolt_instr_locations = __bolt_instr_locations; 1294a0dd5b05SAlexander Shaposhnikov #else 1295a0dd5b05SAlexander Shaposhnikov uint64_t *bolt_instr_locations = _bolt_instr_locations_getter(); 1296a0dd5b05SAlexander Shaposhnikov #endif 1297a0dd5b05SAlexander Shaposhnikov 1298cc4b2fb6SRafael Auler // Skip funcs we know are cold 1299cc4b2fb6SRafael Auler #ifndef ENABLE_DEBUG 130016a497c6SRafael Auler uint64_t CountersFreq = 0; 1301883bf0e8SAmir Ayupov for (int I = 0; I < F.NumLeafNodes; ++I) 1302a0dd5b05SAlexander Shaposhnikov CountersFreq += bolt_instr_locations[F.LeafNodes[I].Counter]; 1303883bf0e8SAmir Ayupov 130416a497c6SRafael Auler if (CountersFreq == 0) { 130516a497c6SRafael Auler for (int I = 0; I < F.NumEdges; ++I) { 130616a497c6SRafael Auler const uint32_t C = F.Edges[I].Counter; 130716a497c6SRafael Auler if (C == 0xffffffff) 130816a497c6SRafael Auler continue; 1309a0dd5b05SAlexander Shaposhnikov CountersFreq += bolt_instr_locations[C]; 131016a497c6SRafael Auler } 131116a497c6SRafael Auler if (CountersFreq == 0) { 131216a497c6SRafael Auler for (int I = 0; I < F.NumCalls; ++I) { 131316a497c6SRafael Auler const uint32_t C = F.Calls[I].Counter; 131416a497c6SRafael Auler if (C == 0xffffffff) 131516a497c6SRafael Auler continue; 1316a0dd5b05SAlexander Shaposhnikov CountersFreq += bolt_instr_locations[C]; 131716a497c6SRafael Auler } 131816a497c6SRafael Auler if (CountersFreq == 0) 1319cc4b2fb6SRafael Auler return next; 132016a497c6SRafael Auler } 132116a497c6SRafael Auler } 1322cc4b2fb6SRafael Auler #endif 1323cc4b2fb6SRafael Auler 1324a0dd5b05SAlexander Shaposhnikov Graph *G = new (Alloc) Graph(Alloc, F, bolt_instr_locations, Ctx); 1325cc4b2fb6SRafael Auler DEBUG(G->dump()); 1326a0dd5b05SAlexander Shaposhnikov 132716a497c6SRafael Auler if (!G->EdgeFreqs && !G->CallFreqs) { 1328cc4b2fb6SRafael Auler G->~Graph(); 1329cc4b2fb6SRafael Auler Alloc.deallocate(G); 1330cc4b2fb6SRafael Auler return next; 1331cc4b2fb6SRafael Auler } 1332cc4b2fb6SRafael Auler 133316a497c6SRafael Auler for (int I = 0; I < F.NumEdges; ++I) { 133416a497c6SRafael Auler const uint64_t Freq = G->EdgeFreqs[I]; 1335cc4b2fb6SRafael Auler if (Freq == 0) 1336cc4b2fb6SRafael Auler continue; 133716a497c6SRafael Auler const EdgeDescription *Desc = &F.Edges[I]; 1338cc4b2fb6SRafael Auler char LineBuf[BufSize]; 1339cc4b2fb6SRafael Auler char *Ptr = LineBuf; 134016a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize); 134116a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf)); 1342cc4b2fb6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 22); 1343cc4b2fb6SRafael Auler Ptr = intToStr(Ptr, Freq, 10); 1344cc4b2fb6SRafael Auler *Ptr++ = '\n'; 1345cc4b2fb6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf); 1346cc4b2fb6SRafael Auler } 1347cc4b2fb6SRafael Auler 134816a497c6SRafael Auler for (int I = 0; I < F.NumCalls; ++I) { 134916a497c6SRafael Auler const uint64_t Freq = G->CallFreqs[I]; 135016a497c6SRafael Auler if (Freq == 0) 135116a497c6SRafael Auler continue; 135216a497c6SRafael Auler char LineBuf[BufSize]; 135316a497c6SRafael Auler char *Ptr = LineBuf; 135416a497c6SRafael Auler const CallDescription *Desc = &F.Calls[I]; 135516a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->From, BufSize); 135616a497c6SRafael Auler Ptr = serializeLoc(Ctx, Ptr, Desc->To, BufSize - (Ptr - LineBuf)); 135716a497c6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25); 135816a497c6SRafael Auler Ptr = intToStr(Ptr, Freq, 10); 135916a497c6SRafael Auler *Ptr++ = '\n'; 136016a497c6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf); 136116a497c6SRafael Auler } 136216a497c6SRafael Auler 1363cc4b2fb6SRafael Auler G->~Graph(); 1364cc4b2fb6SRafael Auler Alloc.deallocate(G); 1365cc4b2fb6SRafael Auler return next; 1366cc4b2fb6SRafael Auler } 1367cc4b2fb6SRafael Auler 1368a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__) 136916a497c6SRafael Auler const IndCallTargetDescription * 137016a497c6SRafael Auler ProfileWriterContext::lookupIndCallTarget(uint64_t Target) const { 137116a497c6SRafael Auler uint32_t B = 0; 137216a497c6SRafael Auler uint32_t E = __bolt_instr_num_ind_targets; 137316a497c6SRafael Auler if (E == 0) 137416a497c6SRafael Auler return nullptr; 137516a497c6SRafael Auler do { 137616a497c6SRafael Auler uint32_t I = (E - B) / 2 + B; 137716a497c6SRafael Auler if (IndCallTargets[I].Address == Target) 137816a497c6SRafael Auler return &IndCallTargets[I]; 137916a497c6SRafael Auler if (IndCallTargets[I].Address < Target) 138016a497c6SRafael Auler B = I + 1; 138116a497c6SRafael Auler else 138216a497c6SRafael Auler E = I; 138316a497c6SRafael Auler } while (B < E); 138416a497c6SRafael Auler return nullptr; 1385cc4b2fb6SRafael Auler } 138662aa74f8SRafael Auler 138716a497c6SRafael Auler /// Write a single indirect call <src, target> pair to the fdata file 138816a497c6SRafael Auler void visitIndCallCounter(IndirectCallHashTable::MapEntry &Entry, 138916a497c6SRafael Auler int FD, int CallsiteID, 139016a497c6SRafael Auler ProfileWriterContext *Ctx) { 139116a497c6SRafael Auler if (Entry.Val == 0) 139216a497c6SRafael Auler return; 139316a497c6SRafael Auler DEBUG(reportNumber("Target func 0x", Entry.Key, 16)); 139416a497c6SRafael Auler DEBUG(reportNumber("Target freq: ", Entry.Val, 10)); 139516a497c6SRafael Auler const IndCallDescription *CallsiteDesc = 139616a497c6SRafael Auler &Ctx->IndCallDescriptions[CallsiteID]; 139716a497c6SRafael Auler const IndCallTargetDescription *TargetDesc = 1398*a86dd9aeSDenis Revunov Ctx->lookupIndCallTarget(Entry.Key - TextBaseAddress); 139916a497c6SRafael Auler if (!TargetDesc) { 140016a497c6SRafael Auler DEBUG(report("Failed to lookup indirect call target\n")); 1401cc4b2fb6SRafael Auler char LineBuf[BufSize]; 140262aa74f8SRafael Auler char *Ptr = LineBuf; 140316a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize); 140416a497c6SRafael Auler Ptr = strCopy(Ptr, "0 [unknown] 0 0 ", BufSize - (Ptr - LineBuf) - 40); 140516a497c6SRafael Auler Ptr = intToStr(Ptr, Entry.Val, 10); 140616a497c6SRafael Auler *Ptr++ = '\n'; 140716a497c6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf); 140816a497c6SRafael Auler return; 140916a497c6SRafael Auler } 141016a497c6SRafael Auler Ctx->CallFlowTable->get(TargetDesc->Address).Calls += Entry.Val; 141116a497c6SRafael Auler char LineBuf[BufSize]; 141216a497c6SRafael Auler char *Ptr = LineBuf; 141316a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, *CallsiteDesc, BufSize); 141416a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf)); 1415cc4b2fb6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25); 141616a497c6SRafael Auler Ptr = intToStr(Ptr, Entry.Val, 10); 141762aa74f8SRafael Auler *Ptr++ = '\n'; 1418821480d2SRafael Auler __write(FD, LineBuf, Ptr - LineBuf); 141962aa74f8SRafael Auler } 1420cc4b2fb6SRafael Auler 142116a497c6SRafael Auler /// Write to \p FD all of the indirect call profiles. 142216a497c6SRafael Auler void writeIndirectCallProfile(int FD, ProfileWriterContext &Ctx) { 142316a497c6SRafael Auler for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) { 142416a497c6SRafael Auler DEBUG(reportNumber("IndCallsite #", I, 10)); 142516a497c6SRafael Auler GlobalIndCallCounters[I].forEachElement(visitIndCallCounter, FD, I, &Ctx); 142616a497c6SRafael Auler } 142716a497c6SRafael Auler } 142816a497c6SRafael Auler 142916a497c6SRafael Auler /// Check a single call flow for a callee versus all known callers. If there are 143016a497c6SRafael Auler /// less callers than what the callee expects, write the difference with source 143116a497c6SRafael Auler /// [unknown] in the profile. 143216a497c6SRafael Auler void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD, 143316a497c6SRafael Auler ProfileWriterContext *Ctx) { 143416a497c6SRafael Auler DEBUG(reportNumber("Call flow entry address: 0x", Entry.Key, 16)); 143516a497c6SRafael Auler DEBUG(reportNumber("Calls: ", Entry.Calls, 10)); 143616a497c6SRafael Auler DEBUG(reportNumber("Reported entry frequency: ", Entry.Val, 10)); 143716a497c6SRafael Auler DEBUG({ 143816a497c6SRafael Auler if (Entry.Calls > Entry.Val) 143916a497c6SRafael Auler report(" More calls than expected!\n"); 144016a497c6SRafael Auler }); 144116a497c6SRafael Auler if (Entry.Val <= Entry.Calls) 144216a497c6SRafael Auler return; 144316a497c6SRafael Auler DEBUG(reportNumber( 144416a497c6SRafael Auler " Balancing calls with traffic: ", Entry.Val - Entry.Calls, 10)); 144516a497c6SRafael Auler const IndCallTargetDescription *TargetDesc = 144616a497c6SRafael Auler Ctx->lookupIndCallTarget(Entry.Key); 144716a497c6SRafael Auler if (!TargetDesc) { 144816a497c6SRafael Auler // There is probably something wrong with this callee and this should be 144916a497c6SRafael Auler // investigated, but I don't want to assert and lose all data collected. 145016a497c6SRafael Auler DEBUG(report("WARNING: failed to look up call target!\n")); 145116a497c6SRafael Auler return; 145216a497c6SRafael Auler } 145316a497c6SRafael Auler char LineBuf[BufSize]; 145416a497c6SRafael Auler char *Ptr = LineBuf; 145516a497c6SRafael Auler Ptr = strCopy(Ptr, "0 [unknown] 0 ", BufSize); 145616a497c6SRafael Auler Ptr = serializeLoc(*Ctx, Ptr, TargetDesc->Loc, BufSize - (Ptr - LineBuf)); 145716a497c6SRafael Auler Ptr = strCopy(Ptr, "0 ", BufSize - (Ptr - LineBuf) - 25); 145816a497c6SRafael Auler Ptr = intToStr(Ptr, Entry.Val - Entry.Calls, 10); 145916a497c6SRafael Auler *Ptr++ = '\n'; 146016a497c6SRafael Auler __write(FD, LineBuf, Ptr - LineBuf); 146116a497c6SRafael Auler } 146216a497c6SRafael Auler 146316a497c6SRafael Auler /// Open fdata file for writing and return a valid file descriptor, aborting 146416a497c6SRafael Auler /// program upon failure. 146516a497c6SRafael Auler int openProfile() { 146616a497c6SRafael Auler // Build the profile name string by appending our PID 146716a497c6SRafael Auler char Buf[BufSize]; 146816a497c6SRafael Auler char *Ptr = Buf; 146916a497c6SRafael Auler uint64_t PID = __getpid(); 147016a497c6SRafael Auler Ptr = strCopy(Buf, __bolt_instr_filename, BufSize); 147116a497c6SRafael Auler if (__bolt_instr_use_pid) { 147216a497c6SRafael Auler Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1)); 147316a497c6SRafael Auler Ptr = intToStr(Ptr, PID, 10); 147416a497c6SRafael Auler Ptr = strCopy(Ptr, ".fdata", BufSize - (Ptr - Buf + 1)); 147516a497c6SRafael Auler } 147616a497c6SRafael Auler *Ptr++ = '\0'; 147760bbddf3SDenis Revunov uint64_t FD = __open(Buf, O_WRONLY | O_TRUNC | O_CREAT, 147816a497c6SRafael Auler /*mode=*/0666); 147916a497c6SRafael Auler if (static_cast<int64_t>(FD) < 0) { 148016a497c6SRafael Auler report("Error while trying to open profile file for writing: "); 148116a497c6SRafael Auler report(Buf); 148216a497c6SRafael Auler reportNumber("\nFailed with error number: 0x", 148316a497c6SRafael Auler 0 - static_cast<int64_t>(FD), 16); 148416a497c6SRafael Auler __exit(1); 148516a497c6SRafael Auler } 148616a497c6SRafael Auler return FD; 148716a497c6SRafael Auler } 1488a0dd5b05SAlexander Shaposhnikov 1489a0dd5b05SAlexander Shaposhnikov #endif 1490a0dd5b05SAlexander Shaposhnikov 149116a497c6SRafael Auler } // anonymous namespace 149216a497c6SRafael Auler 1493a0dd5b05SAlexander Shaposhnikov #if !defined(__APPLE__) 1494a0dd5b05SAlexander Shaposhnikov 149516a497c6SRafael Auler /// Reset all counters in case you want to start profiling a new phase of your 149616a497c6SRafael Auler /// program independently of prior phases. 149716a497c6SRafael Auler /// The address of this function is printed by BOLT and this can be called by 149816a497c6SRafael Auler /// any attached debugger during runtime. There is a useful oneliner for gdb: 149916a497c6SRafael Auler /// 150016a497c6SRafael Auler /// gdb -p $(pgrep -xo PROCESSNAME) -ex 'p ((void(*)())0xdeadbeef)()' \ 150116a497c6SRafael Auler /// -ex 'set confirm off' -ex quit 150216a497c6SRafael Auler /// 150316a497c6SRafael Auler /// Where 0xdeadbeef is this function address and PROCESSNAME your binary file 150416a497c6SRafael Auler /// name. 150516a497c6SRafael Auler extern "C" void __bolt_instr_clear_counters() { 1506ea2182feSMaksim Panchenko memset(reinterpret_cast<char *>(__bolt_instr_locations), 0, 150716a497c6SRafael Auler __bolt_num_counters * 8); 1508883bf0e8SAmir Ayupov for (int I = 0; I < __bolt_instr_num_ind_calls; ++I) 150916a497c6SRafael Auler GlobalIndCallCounters[I].resetCounters(); 151016a497c6SRafael Auler } 151116a497c6SRafael Auler 151216a497c6SRafael Auler /// This is the entry point for profile writing. 151316a497c6SRafael Auler /// There are three ways of getting here: 151416a497c6SRafael Auler /// 151516a497c6SRafael Auler /// * Program execution ended, finalization methods are running and BOLT 151616a497c6SRafael Auler /// hooked into FINI from your binary dynamic section; 151716a497c6SRafael Auler /// * You used the sleep timer option and during initialization we forked 151816a497c6SRafael Auler /// a separete process that will call this function periodically; 151916a497c6SRafael Auler /// * BOLT prints this function address so you can attach a debugger and 152016a497c6SRafael Auler /// call this function directly to get your profile written to disk 152116a497c6SRafael Auler /// on demand. 152216a497c6SRafael Auler /// 1523ad79d517SVasily Leonenko extern "C" void __attribute((force_align_arg_pointer)) 1524a7992981SDenis Revunov __bolt_instr_data_dump(int FD) { 152516a497c6SRafael Auler // Already dumping 152616a497c6SRafael Auler if (!GlobalWriteProfileMutex->acquire()) 152716a497c6SRafael Auler return; 152816a497c6SRafael Auler 1529a7992981SDenis Revunov int ret = __lseek(FD, 0, SEEK_SET); 1530a7992981SDenis Revunov assert(ret == 0, "Failed to lseek!"); 1531a7992981SDenis Revunov ret = __ftruncate(FD, 0); 1532a7992981SDenis Revunov assert(ret == 0, "Failed to ftruncate!"); 153316a497c6SRafael Auler BumpPtrAllocator HashAlloc; 153416a497c6SRafael Auler HashAlloc.setMaxSize(0x6400000); 153516a497c6SRafael Auler ProfileWriterContext Ctx = readDescriptions(); 153616a497c6SRafael Auler Ctx.CallFlowTable = new (HashAlloc, 0) CallFlowHashTable(HashAlloc); 153716a497c6SRafael Auler 153816a497c6SRafael Auler DEBUG(printStats(Ctx)); 153916a497c6SRafael Auler 1540cc4b2fb6SRafael Auler BumpPtrAllocator Alloc; 1541eaf1b566SJakub Beránek Alloc.setMaxSize(0x6400000); 154216a497c6SRafael Auler const uint8_t *FuncDesc = Ctx.FuncDescriptions; 1543cc4b2fb6SRafael Auler for (int I = 0, E = __bolt_instr_num_funcs; I < E; ++I) { 154416a497c6SRafael Auler FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc); 154516a497c6SRafael Auler Alloc.clear(); 1546cc4b2fb6SRafael Auler DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16)); 1547cc4b2fb6SRafael Auler } 154816a497c6SRafael Auler assert(FuncDesc == (void *)Ctx.Strings, 1549cc4b2fb6SRafael Auler "FuncDesc ptr must be equal to stringtable"); 1550cc4b2fb6SRafael Auler 155116a497c6SRafael Auler writeIndirectCallProfile(FD, Ctx); 155216a497c6SRafael Auler Ctx.CallFlowTable->forEachElement(visitCallFlowEntry, FD, &Ctx); 155316a497c6SRafael Auler 1554dcdd37fdSVladislav Khmelevsky __fsync(FD); 155516a497c6SRafael Auler __munmap(Ctx.MMapPtr, Ctx.MMapSize); 155616a497c6SRafael Auler __close(Ctx.FileDesc); 155716a497c6SRafael Auler HashAlloc.destroy(); 155816a497c6SRafael Auler GlobalWriteProfileMutex->release(); 155916a497c6SRafael Auler DEBUG(report("Finished writing profile.\n")); 156016a497c6SRafael Auler } 156116a497c6SRafael Auler 156216a497c6SRafael Auler /// Event loop for our child process spawned during setup to dump profile data 156316a497c6SRafael Auler /// at user-specified intervals 156416a497c6SRafael Auler void watchProcess() { 156516a497c6SRafael Auler timespec ts, rem; 156616a497c6SRafael Auler uint64_t Ellapsed = 0ull; 1567a7992981SDenis Revunov int FD = openProfile(); 156876d346caSVladislav Khmelevsky uint64_t ppid; 156976d346caSVladislav Khmelevsky if (__bolt_instr_wait_forks) { 157076d346caSVladislav Khmelevsky // Store parent pgid 157176d346caSVladislav Khmelevsky ppid = -__getpgid(0); 157276d346caSVladislav Khmelevsky // And leave parent process group 157376d346caSVladislav Khmelevsky __setpgid(0, 0); 157476d346caSVladislav Khmelevsky } else { 157576d346caSVladislav Khmelevsky // Store parent pid 157676d346caSVladislav Khmelevsky ppid = __getppid(); 157776d346caSVladislav Khmelevsky if (ppid == 1) { 157876d346caSVladislav Khmelevsky // Parent already dead 1579a7992981SDenis Revunov __bolt_instr_data_dump(FD); 158076d346caSVladislav Khmelevsky goto out; 158176d346caSVladislav Khmelevsky } 158276d346caSVladislav Khmelevsky } 158376d346caSVladislav Khmelevsky 158416a497c6SRafael Auler ts.tv_sec = 1; 158516a497c6SRafael Auler ts.tv_nsec = 0; 158616a497c6SRafael Auler while (1) { 158716a497c6SRafael Auler __nanosleep(&ts, &rem); 158876d346caSVladislav Khmelevsky // This means our parent process or all its forks are dead, 158976d346caSVladislav Khmelevsky // so no need for us to keep dumping. 159076d346caSVladislav Khmelevsky if (__kill(ppid, 0) < 0) { 159176d346caSVladislav Khmelevsky if (__bolt_instr_no_counters_clear) 1592a7992981SDenis Revunov __bolt_instr_data_dump(FD); 159316a497c6SRafael Auler break; 159416a497c6SRafael Auler } 159576d346caSVladislav Khmelevsky 159616a497c6SRafael Auler if (++Ellapsed < __bolt_instr_sleep_time) 159716a497c6SRafael Auler continue; 159876d346caSVladislav Khmelevsky 159916a497c6SRafael Auler Ellapsed = 0; 1600a7992981SDenis Revunov __bolt_instr_data_dump(FD); 160176d346caSVladislav Khmelevsky if (__bolt_instr_no_counters_clear == false) 160216a497c6SRafael Auler __bolt_instr_clear_counters(); 160316a497c6SRafael Auler } 160476d346caSVladislav Khmelevsky 160576d346caSVladislav Khmelevsky out:; 160616a497c6SRafael Auler DEBUG(report("My parent process is dead, bye!\n")); 1607a7992981SDenis Revunov __close(FD); 160816a497c6SRafael Auler __exit(0); 160916a497c6SRafael Auler } 161016a497c6SRafael Auler 161116a497c6SRafael Auler extern "C" void __bolt_instr_indirect_call(); 161216a497c6SRafael Auler extern "C" void __bolt_instr_indirect_tailcall(); 161316a497c6SRafael Auler 161416a497c6SRafael Auler /// Initialization code 1615ad79d517SVasily Leonenko extern "C" void __attribute((force_align_arg_pointer)) __bolt_instr_setup() { 161658a16d84SAmir Ayupov __bolt_ind_call_counter_func_pointer = __bolt_instr_indirect_call; 161758a16d84SAmir Ayupov __bolt_ind_tailcall_counter_func_pointer = __bolt_instr_indirect_tailcall; 1618*a86dd9aeSDenis Revunov TextBaseAddress = getTextBaseAddress(); 161958a16d84SAmir Ayupov 162016a497c6SRafael Auler const uint64_t CountersStart = 162116a497c6SRafael Auler reinterpret_cast<uint64_t>(&__bolt_instr_locations[0]); 162216a497c6SRafael Auler const uint64_t CountersEnd = alignTo( 162316a497c6SRafael Auler reinterpret_cast<uint64_t>(&__bolt_instr_locations[__bolt_num_counters]), 162416a497c6SRafael Auler 0x1000); 162516a497c6SRafael Auler DEBUG(reportNumber("replace mmap start: ", CountersStart, 16)); 162616a497c6SRafael Auler DEBUG(reportNumber("replace mmap stop: ", CountersEnd, 16)); 162716a497c6SRafael Auler assert(CountersEnd > CountersStart, "no counters"); 162858a16d84SAmir Ayupov 162958a16d84SAmir Ayupov const bool Shared = !__bolt_instr_use_pid; 163058a16d84SAmir Ayupov const uint64_t MapPrivateOrShared = Shared ? MAP_SHARED : MAP_PRIVATE; 163158a16d84SAmir Ayupov 16328b23a853SDenis Revunov void *Ret = 16338b23a853SDenis Revunov __mmap(CountersStart, CountersEnd - CountersStart, PROT_READ | PROT_WRITE, 163458a16d84SAmir Ayupov MAP_ANONYMOUS | MapPrivateOrShared | MAP_FIXED, -1, 0); 16358ed172cfSDenis Revunov assert(Ret != MAP_FAILED, "__bolt_instr_setup: Failed to mmap counters!"); 163658a16d84SAmir Ayupov 16370cc19b56SDenis Revunov GlobalMetadataStorage = __mmap(0, 4096, PROT_READ | PROT_WRITE, 16380cc19b56SDenis Revunov MapPrivateOrShared | MAP_ANONYMOUS, -1, 0); 16390cc19b56SDenis Revunov assert(GlobalMetadataStorage != MAP_FAILED, 16400cc19b56SDenis Revunov "__bolt_instr_setup: failed to mmap page for metadata!"); 16410cc19b56SDenis Revunov 16420cc19b56SDenis Revunov GlobalAlloc = new (GlobalMetadataStorage) BumpPtrAllocator; 16430cc19b56SDenis Revunov // Conservatively reserve 100MiB 16440cc19b56SDenis Revunov GlobalAlloc->setMaxSize(0x6400000); 16450cc19b56SDenis Revunov GlobalAlloc->setShared(Shared); 16460cc19b56SDenis Revunov GlobalWriteProfileMutex = new (*GlobalAlloc, 0) Mutex(); 164716a497c6SRafael Auler if (__bolt_instr_num_ind_calls > 0) 164816a497c6SRafael Auler GlobalIndCallCounters = 16490cc19b56SDenis Revunov new (*GlobalAlloc, 0) IndirectCallHashTable[__bolt_instr_num_ind_calls]; 165016a497c6SRafael Auler 165116a497c6SRafael Auler if (__bolt_instr_sleep_time != 0) { 165276d346caSVladislav Khmelevsky // Separate instrumented process to the own process group 165376d346caSVladislav Khmelevsky if (__bolt_instr_wait_forks) 165476d346caSVladislav Khmelevsky __setpgid(0, 0); 165576d346caSVladislav Khmelevsky 1656c7306cc2SAmir Ayupov if (long PID = __fork()) 165716a497c6SRafael Auler return; 165816a497c6SRafael Auler watchProcess(); 165916a497c6SRafael Auler } 166016a497c6SRafael Auler } 166116a497c6SRafael Auler 1662361f3b55SVladislav Khmelevsky extern "C" __attribute((force_align_arg_pointer)) void 1663361f3b55SVladislav Khmelevsky instrumentIndirectCall(uint64_t Target, uint64_t IndCallID) { 16640cc19b56SDenis Revunov GlobalIndCallCounters[IndCallID].incrementVal(Target, *GlobalAlloc); 166516a497c6SRafael Auler } 166616a497c6SRafael Auler 166716a497c6SRafael Auler /// We receive as in-stack arguments the identifier of the indirect call site 166816a497c6SRafael Auler /// as well as the target address for the call 166916a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_indirect_call() 167016a497c6SRafael Auler { 167116a497c6SRafael Auler __asm__ __volatile__(SAVE_ALL 1672361f3b55SVladislav Khmelevsky "mov 0xa0(%%rsp), %%rdi\n" 1673361f3b55SVladislav Khmelevsky "mov 0x98(%%rsp), %%rsi\n" 167416a497c6SRafael Auler "call instrumentIndirectCall\n" 167516a497c6SRafael Auler RESTORE_ALL 1676361f3b55SVladislav Khmelevsky "ret\n" 167716a497c6SRafael Auler :::); 167816a497c6SRafael Auler } 167916a497c6SRafael Auler 168016a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_indirect_tailcall() 168116a497c6SRafael Auler { 168216a497c6SRafael Auler __asm__ __volatile__(SAVE_ALL 1683361f3b55SVladislav Khmelevsky "mov 0x98(%%rsp), %%rdi\n" 1684361f3b55SVladislav Khmelevsky "mov 0x90(%%rsp), %%rsi\n" 168516a497c6SRafael Auler "call instrumentIndirectCall\n" 168616a497c6SRafael Auler RESTORE_ALL 1687361f3b55SVladislav Khmelevsky "ret\n" 168816a497c6SRafael Auler :::); 168916a497c6SRafael Auler } 169016a497c6SRafael Auler 169116a497c6SRafael Auler /// This is hooking ELF's entry, it needs to save all machine state. 169216a497c6SRafael Auler extern "C" __attribute((naked)) void __bolt_instr_start() 169316a497c6SRafael Auler { 169416a497c6SRafael Auler __asm__ __volatile__(SAVE_ALL 169516a497c6SRafael Auler "call __bolt_instr_setup\n" 169616a497c6SRafael Auler RESTORE_ALL 1697ad79d517SVasily Leonenko "jmp __bolt_start_trampoline\n" 169816a497c6SRafael Auler :::); 169916a497c6SRafael Auler } 170016a497c6SRafael Auler 170116a497c6SRafael Auler /// This is hooking into ELF's DT_FINI 170216a497c6SRafael Auler extern "C" void __bolt_instr_fini() { 1703553f28e9SVladislav Khmelevsky __bolt_fini_trampoline(); 1704a7992981SDenis Revunov if (__bolt_instr_sleep_time == 0) { 1705a7992981SDenis Revunov int FD = openProfile(); 1706a7992981SDenis Revunov __bolt_instr_data_dump(FD); 1707a7992981SDenis Revunov __close(FD); 1708a7992981SDenis Revunov } 170916a497c6SRafael Auler DEBUG(report("Finished.\n")); 171062aa74f8SRafael Auler } 1711bbd9d610SAlexander Shaposhnikov 17123b876cc3SAlexander Shaposhnikov #endif 17133b876cc3SAlexander Shaposhnikov 17143b876cc3SAlexander Shaposhnikov #if defined(__APPLE__) 1715bbd9d610SAlexander Shaposhnikov 1716a0dd5b05SAlexander Shaposhnikov extern "C" void __bolt_instr_data_dump() { 1717a0dd5b05SAlexander Shaposhnikov ProfileWriterContext Ctx = readDescriptions(); 1718a0dd5b05SAlexander Shaposhnikov 1719a0dd5b05SAlexander Shaposhnikov int FD = 2; 1720a0dd5b05SAlexander Shaposhnikov BumpPtrAllocator Alloc; 1721a0dd5b05SAlexander Shaposhnikov const uint8_t *FuncDesc = Ctx.FuncDescriptions; 1722a0dd5b05SAlexander Shaposhnikov uint32_t bolt_instr_num_funcs = _bolt_instr_num_funcs_getter(); 1723a0dd5b05SAlexander Shaposhnikov 1724a0dd5b05SAlexander Shaposhnikov for (int I = 0, E = bolt_instr_num_funcs; I < E; ++I) { 1725a0dd5b05SAlexander Shaposhnikov FuncDesc = writeFunctionProfile(FD, Ctx, FuncDesc, Alloc); 1726a0dd5b05SAlexander Shaposhnikov Alloc.clear(); 1727a0dd5b05SAlexander Shaposhnikov DEBUG(reportNumber("FuncDesc now: ", (uint64_t)FuncDesc, 16)); 1728a0dd5b05SAlexander Shaposhnikov } 1729a0dd5b05SAlexander Shaposhnikov assert(FuncDesc == (void *)Ctx.Strings, 1730a0dd5b05SAlexander Shaposhnikov "FuncDesc ptr must be equal to stringtable"); 1731a0dd5b05SAlexander Shaposhnikov } 1732a0dd5b05SAlexander Shaposhnikov 1733bbd9d610SAlexander Shaposhnikov // On OSX/iOS the final symbol name of an extern "C" function/variable contains 1734bbd9d610SAlexander Shaposhnikov // one extra leading underscore: _bolt_instr_setup -> __bolt_instr_setup. 17353b876cc3SAlexander Shaposhnikov extern "C" 17363b876cc3SAlexander Shaposhnikov __attribute__((section("__TEXT,__setup"))) 17373b876cc3SAlexander Shaposhnikov __attribute__((force_align_arg_pointer)) 17383b876cc3SAlexander Shaposhnikov void _bolt_instr_setup() { 1739a0dd5b05SAlexander Shaposhnikov __asm__ __volatile__(SAVE_ALL :::); 17403b876cc3SAlexander Shaposhnikov 1741a0dd5b05SAlexander Shaposhnikov report("Hello!\n"); 17423b876cc3SAlexander Shaposhnikov 1743a0dd5b05SAlexander Shaposhnikov __asm__ __volatile__(RESTORE_ALL :::); 17441cf23e5eSAlexander Shaposhnikov } 1745bbd9d610SAlexander Shaposhnikov 17463b876cc3SAlexander Shaposhnikov extern "C" 17473b876cc3SAlexander Shaposhnikov __attribute__((section("__TEXT,__fini"))) 17483b876cc3SAlexander Shaposhnikov __attribute__((force_align_arg_pointer)) 17493b876cc3SAlexander Shaposhnikov void _bolt_instr_fini() { 1750a0dd5b05SAlexander Shaposhnikov report("Bye!\n"); 1751a0dd5b05SAlexander Shaposhnikov __bolt_instr_data_dump(); 1752e067f2adSAlexander Shaposhnikov } 1753e067f2adSAlexander Shaposhnikov 1754bbd9d610SAlexander Shaposhnikov #endif 1755cb8d701bSVladislav Khmelevsky #endif 1756