xref: /llvm-project/compiler-rt/lib/xray/xray_interface.cpp (revision ea76b2d8d83d6885bf5707832cbc4b7655e21b08)
1 //===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
10 //
11 // Implementation of the API functions.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "xray_interface_internal.h"
16 
17 #include <cinttypes>
18 #include <cstdio>
19 #include <errno.h>
20 #include <limits>
21 #include <string.h>
22 #include <sys/mman.h>
23 
24 #if SANITIZER_FUCHSIA
25 #include <zircon/process.h>
26 #include <zircon/sanitizer.h>
27 #include <zircon/status.h>
28 #include <zircon/syscalls.h>
29 #endif
30 
31 #include "sanitizer_common/sanitizer_addrhashmap.h"
32 #include "sanitizer_common/sanitizer_common.h"
33 
34 #include "xray_defs.h"
35 #include "xray_flags.h"
36 
37 extern __sanitizer::SpinMutex XRayInstrMapMutex;
38 extern __sanitizer::atomic_uint8_t XRayInitialized;
39 extern __xray::XRaySledMap *XRayInstrMaps;
40 extern __sanitizer::atomic_uint32_t XRayNumObjects;
41 
42 namespace __xray {
43 
44 #if defined(__x86_64__)
45 static const int16_t cSledLength = 12;
46 #elif defined(__aarch64__)
47 static const int16_t cSledLength = 32;
48 #elif defined(__arm__)
49 static const int16_t cSledLength = 28;
50 #elif SANITIZER_LOONGARCH64
51 static const int16_t cSledLength = 48;
52 #elif SANITIZER_MIPS32
53 static const int16_t cSledLength = 48;
54 #elif SANITIZER_MIPS64
55 static const int16_t cSledLength = 64;
56 #elif defined(__powerpc64__)
57 static const int16_t cSledLength = 8;
58 #elif defined(__hexagon__)
59 static const int16_t cSledLength = 20;
60 #elif defined(__riscv) && (__riscv_xlen == 64)
61 static const int16_t cSledLength = 68;
62 #elif defined(__riscv) && (__riscv_xlen == 32)
63 static const int16_t cSledLength = 52;
64 #else
65 #error "Unsupported CPU Architecture"
66 #endif /* CPU architecture */
67 
68 // This is the function to call when we encounter the entry or exit sleds.
69 atomic_uintptr_t XRayPatchedFunction SANITIZER_INTERFACE_ATTRIBUTE{0};
70 
71 // This is the function to call from the arg1-enabled sleds/trampolines.
72 atomic_uintptr_t XRayArgLogger SANITIZER_INTERFACE_ATTRIBUTE{0};
73 
74 // This is the function to call when we encounter a custom event log call.
75 atomic_uintptr_t XRayPatchedCustomEvent SANITIZER_INTERFACE_ATTRIBUTE{0};
76 
77 // This is the function to call when we encounter a typed event log call.
78 atomic_uintptr_t XRayPatchedTypedEvent SANITIZER_INTERFACE_ATTRIBUTE{0};
79 
80 // This is the global status to determine whether we are currently
81 // patching/unpatching.
82 atomic_uint8_t XRayPatching{0};
83 
84 struct TypeDescription {
85   uint32_t type_id;
86   std::size_t description_string_length;
87 };
88 
89 using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>;
90 // An address map from immutable descriptors to type ids.
91 TypeDescriptorMapType TypeDescriptorAddressMap{};
92 
93 atomic_uint32_t TypeEventDescriptorCounter{0};
94 
95 // MProtectHelper is an RAII wrapper for calls to mprotect(...) that will
96 // undo any successful mprotect(...) changes. This is used to make a page
97 // writeable and executable, and upon destruction if it was successful in
98 // doing so returns the page into a read-only and executable page.
99 //
100 // This is only used specifically for runtime-patching of the XRay
101 // instrumentation points. This assumes that the executable pages are
102 // originally read-and-execute only.
103 class MProtectHelper {
104   void *PageAlignedAddr;
105   std::size_t MProtectLen;
106   bool MustCleanup;
107 
108 public:
109   explicit MProtectHelper(void *PageAlignedAddr,
110                           std::size_t MProtectLen,
111                           std::size_t PageSize) XRAY_NEVER_INSTRUMENT
112       : PageAlignedAddr(PageAlignedAddr),
113         MProtectLen(MProtectLen),
114         MustCleanup(false) {
115 #if SANITIZER_FUCHSIA
116     MProtectLen = RoundUpTo(MProtectLen, PageSize);
117 #endif
118   }
119 
120   int MakeWriteable() XRAY_NEVER_INSTRUMENT {
121 #if SANITIZER_FUCHSIA
122     auto R = __sanitizer_change_code_protection(
123         reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, true);
124     if (R != ZX_OK) {
125       Report("XRay: cannot change code protection: %s\n",
126              _zx_status_get_string(R));
127       return -1;
128     }
129     MustCleanup = true;
130     return 0;
131 #else
132     auto R = mprotect(PageAlignedAddr, MProtectLen,
133                       PROT_READ | PROT_WRITE | PROT_EXEC);
134     if (R != -1)
135       MustCleanup = true;
136     return R;
137 #endif
138   }
139 
140   ~MProtectHelper() XRAY_NEVER_INSTRUMENT {
141     if (MustCleanup) {
142 #if SANITIZER_FUCHSIA
143       auto R = __sanitizer_change_code_protection(
144           reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, false);
145       if (R != ZX_OK) {
146         Report("XRay: cannot change code protection: %s\n",
147                _zx_status_get_string(R));
148       }
149 #else
150       mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
151 #endif
152     }
153   }
154 };
155 
156 namespace {
157 
158 bool isObjectLoaded(int32_t ObjId) {
159   SpinMutexLock Guard(&XRayInstrMapMutex);
160   if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
161                        atomic_load(&XRayNumObjects, memory_order_acquire)) {
162     return false;
163   }
164   return XRayInstrMaps[ObjId].Loaded;
165 }
166 
167 bool patchSled(const XRaySledEntry &Sled, bool Enable, int32_t FuncId,
168                const XRayTrampolines &Trampolines) XRAY_NEVER_INSTRUMENT {
169   bool Success = false;
170   switch (Sled.Kind) {
171   case XRayEntryType::ENTRY:
172     Success = patchFunctionEntry(Enable, FuncId, Sled, Trampolines,
173                                  /*LogArgs=*/false);
174     break;
175   case XRayEntryType::EXIT:
176     Success = patchFunctionExit(Enable, FuncId, Sled, Trampolines);
177     break;
178   case XRayEntryType::TAIL:
179     Success = patchFunctionTailExit(Enable, FuncId, Sled, Trampolines);
180     break;
181   case XRayEntryType::LOG_ARGS_ENTRY:
182     Success = patchFunctionEntry(Enable, FuncId, Sled, Trampolines,
183                                  /*LogArgs=*/true);
184     break;
185   case XRayEntryType::CUSTOM_EVENT:
186     Success = patchCustomEvent(Enable, FuncId, Sled);
187     break;
188   case XRayEntryType::TYPED_EVENT:
189     Success = patchTypedEvent(Enable, FuncId, Sled);
190     break;
191   default:
192     Report("Unsupported sled kind '%" PRIu64 "' @%04x\n", Sled.Address,
193            int(Sled.Kind));
194     return false;
195   }
196   return Success;
197 }
198 
199 const XRayFunctionSledIndex
200 findFunctionSleds(int32_t FuncId,
201                   const XRaySledMap &InstrMap) XRAY_NEVER_INSTRUMENT {
202   int32_t CurFn = 0;
203   uint64_t LastFnAddr = 0;
204   XRayFunctionSledIndex Index = {nullptr, 0};
205 
206   for (std::size_t I = 0; I < InstrMap.Entries && CurFn <= FuncId; I++) {
207     const auto &Sled = InstrMap.Sleds[I];
208     const auto Function = Sled.function();
209     if (Function != LastFnAddr) {
210       CurFn++;
211       LastFnAddr = Function;
212     }
213 
214     if (CurFn == FuncId) {
215       if (Index.Begin == nullptr)
216         Index.Begin = &Sled;
217       Index.Size = &Sled - Index.Begin + 1;
218     }
219   }
220 
221   return Index;
222 }
223 
224 XRayPatchingStatus patchFunction(int32_t FuncId, int32_t ObjId,
225                                  bool Enable) XRAY_NEVER_INSTRUMENT {
226   if (!atomic_load(&XRayInitialized, memory_order_acquire))
227     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
228 
229   uint8_t NotPatching = false;
230   if (!atomic_compare_exchange_strong(
231           &XRayPatching, &NotPatching, true, memory_order_acq_rel))
232     return XRayPatchingStatus::ONGOING; // Already patching.
233 
234   // Next, we look for the function index.
235   XRaySledMap InstrMap;
236   {
237     SpinMutexLock Guard(&XRayInstrMapMutex);
238     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
239                          atomic_load(&XRayNumObjects, memory_order_acquire)) {
240       Report("Unable to patch function: invalid sled map index: %d", ObjId);
241       return XRayPatchingStatus::FAILED;
242     }
243     InstrMap = XRayInstrMaps[ObjId];
244   }
245 
246   // If we don't have an index, we can't patch individual functions.
247   if (InstrMap.Functions == 0)
248     return XRayPatchingStatus::NOT_INITIALIZED;
249 
250   // Check if the corresponding DSO has been unloaded.
251   if (!InstrMap.Loaded) {
252     Report("Invalid function id provided: %d\n", FuncId);
253     return XRayPatchingStatus::NOT_INITIALIZED;
254   }
255 
256   // FuncId must be a positive number, less than the number of functions
257   // instrumented.
258   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
259     Report("Invalid function id provided: %d\n", FuncId);
260     return XRayPatchingStatus::FAILED;
261   }
262 
263   auto PackedId = __xray::MakePackedId(FuncId, ObjId);
264 
265   // Now we patch ths sleds for this specific function.
266   XRayFunctionSledIndex SledRange;
267   if (InstrMap.SledsIndex) {
268     SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
269                  InstrMap.SledsIndex[FuncId - 1].Size};
270   } else {
271     SledRange = findFunctionSleds(FuncId, InstrMap);
272   }
273 
274   auto *f = SledRange.Begin;
275   bool SucceedOnce = false;
276   for (size_t i = 0; i != SledRange.Size; ++i)
277     SucceedOnce |= patchSled(f[i], Enable, PackedId, InstrMap.Trampolines);
278 
279   atomic_store(&XRayPatching, false, memory_order_release);
280 
281   if (!SucceedOnce) {
282     Report("Failed patching any sled for function '%d'.", FuncId);
283     return XRayPatchingStatus::FAILED;
284   }
285 
286   return XRayPatchingStatus::SUCCESS;
287 }
288 
289 // controlPatching implements the common internals of the patching/unpatching
290 // implementation. |Enable| defines whether we're enabling or disabling the
291 // runtime XRay instrumentation.
292 // This function should only be called after ensuring that XRay is initialized
293 // and no other thread is currently patching.
294 XRayPatchingStatus controlPatchingObjectUnchecked(bool Enable, int32_t ObjId) {
295   XRaySledMap InstrMap;
296   {
297     SpinMutexLock Guard(&XRayInstrMapMutex);
298     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
299                          atomic_load(&XRayNumObjects, memory_order_acquire)) {
300       Report("Unable to patch functions: invalid sled map index: %d\n", ObjId);
301       return XRayPatchingStatus::FAILED;
302     }
303     InstrMap = XRayInstrMaps[ObjId];
304   }
305   if (InstrMap.Entries == 0)
306     return XRayPatchingStatus::NOT_INITIALIZED;
307 
308   if (Verbosity())
309     Report("Patching object %d with %d functions.\n", ObjId, InstrMap.Entries);
310 
311   // Check if the corresponding DSO has been unloaded.
312   if (!InstrMap.Loaded) {
313     Report("Object is not loaded at index: %d\n", ObjId);
314     return XRayPatchingStatus::FAILED;
315   }
316 
317   uint32_t FuncId = 1;
318   uint64_t CurFun = 0;
319 
320   // First we want to find the bounds for which we have instrumentation points,
321   // and try to get as few calls to mprotect(...) as possible. We're assuming
322   // that all the sleds for the instrumentation map are contiguous as a single
323   // set of pages. When we do support dynamic shared object instrumentation,
324   // we'll need to do this for each set of page load offsets per DSO loaded. For
325   // now we're assuming we can mprotect the whole section of text between the
326   // minimum sled address and the maximum sled address (+ the largest sled
327   // size).
328   auto *MinSled = &InstrMap.Sleds[0];
329   auto *MaxSled = &InstrMap.Sleds[InstrMap.Entries - 1];
330   for (std::size_t I = 0; I < InstrMap.Entries; I++) {
331     const auto &Sled = InstrMap.Sleds[I];
332     if (Sled.address() < MinSled->address())
333       MinSled = &Sled;
334     if (Sled.address() > MaxSled->address())
335       MaxSled = &Sled;
336   }
337 
338   const size_t PageSize = flags()->xray_page_size_override > 0
339                               ? flags()->xray_page_size_override
340                               : GetPageSizeCached();
341   if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
342     Report("System page size is not a power of two: %zu\n", PageSize);
343     return XRayPatchingStatus::FAILED;
344   }
345 
346   void *PageAlignedAddr =
347       reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1));
348   size_t MProtectLen =
349       (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) +
350       cSledLength;
351   MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
352   if (Protector.MakeWriteable() == -1) {
353     Report("Failed mprotect: %d\n", errno);
354     return XRayPatchingStatus::FAILED;
355   }
356 
357   for (std::size_t I = 0; I < InstrMap.Entries; ++I) {
358     auto &Sled = InstrMap.Sleds[I];
359     auto F = Sled.function();
360     if (CurFun == 0)
361       CurFun = F;
362     if (F != CurFun) {
363       ++FuncId;
364       CurFun = F;
365     }
366     auto PackedId = __xray::MakePackedId(FuncId, ObjId);
367     patchSled(Sled, Enable, PackedId, InstrMap.Trampolines);
368   }
369   atomic_store(&XRayPatching, false, memory_order_release);
370   return XRayPatchingStatus::SUCCESS;
371 }
372 
373 // Controls patching for all registered objects.
374 // Returns: SUCCESS, if patching succeeds for all objects.
375 //          NOT_INITIALIZED, if one or more objects returned NOT_INITIALIZED
376 //             but none failed.
377 //          FAILED, if patching of one or more objects failed.
378 XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
379   if (!atomic_load(&XRayInitialized, memory_order_acquire))
380     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
381 
382   uint8_t NotPatching = false;
383   if (!atomic_compare_exchange_strong(&XRayPatching, &NotPatching, true,
384                                       memory_order_acq_rel))
385     return XRayPatchingStatus::ONGOING; // Already patching.
386 
387   auto XRayPatchingStatusResetter = at_scope_exit(
388       [] { atomic_store(&XRayPatching, false, memory_order_release); });
389 
390   unsigned NumObjects = __xray_num_objects();
391 
392   XRayPatchingStatus CombinedStatus{NOT_INITIALIZED};
393   for (unsigned I = 0; I < NumObjects; ++I) {
394     if (!isObjectLoaded(I))
395       continue;
396     auto LastStatus = controlPatchingObjectUnchecked(Enable, I);
397     switch (LastStatus) {
398     case SUCCESS:
399       if (CombinedStatus == NOT_INITIALIZED)
400         CombinedStatus = SUCCESS;
401       break;
402     case FAILED:
403       // Report failure, but try to patch the remaining objects
404       CombinedStatus = FAILED;
405       break;
406     case NOT_INITIALIZED:
407       // XRay has been initialized but there are no sleds available for this
408       // object. Try to patch remaining objects.
409       if (CombinedStatus != FAILED)
410         CombinedStatus = NOT_INITIALIZED;
411       break;
412     case ONGOING:
413       UNREACHABLE("Status ONGOING should not appear at this point");
414     }
415   }
416   return CombinedStatus;
417 }
418 
419 // Controls patching for one object.
420 XRayPatchingStatus controlPatching(bool Enable,
421                                    int32_t ObjId) XRAY_NEVER_INSTRUMENT {
422 
423   if (!atomic_load(&XRayInitialized, memory_order_acquire))
424     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
425 
426   uint8_t NotPatching = false;
427   if (!atomic_compare_exchange_strong(&XRayPatching, &NotPatching, true,
428                                       memory_order_acq_rel))
429     return XRayPatchingStatus::ONGOING; // Already patching.
430 
431   auto XRayPatchingStatusResetter = at_scope_exit(
432       [] { atomic_store(&XRayPatching, false, memory_order_release); });
433 
434   return controlPatchingObjectUnchecked(Enable, ObjId);
435 }
436 
437 XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId, int32_t ObjId,
438                                             bool Enable) XRAY_NEVER_INSTRUMENT {
439   XRaySledMap InstrMap;
440   {
441     SpinMutexLock Guard(&XRayInstrMapMutex);
442     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
443                          atomic_load(&XRayNumObjects, memory_order_acquire)) {
444       Report("Unable to patch function: invalid sled map index: %d\n", ObjId);
445       return XRayPatchingStatus::FAILED;
446     }
447     InstrMap = XRayInstrMaps[ObjId];
448   }
449 
450   // Check if the corresponding DSO has been unloaded.
451   if (!InstrMap.Loaded) {
452     Report("Object is not loaded at index: %d\n", ObjId);
453     return XRayPatchingStatus::FAILED;
454   }
455 
456   // FuncId must be a positive number, less than the number of functions
457   // instrumented.
458   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
459     Report("Invalid function id provided: %d\n", FuncId);
460     return XRayPatchingStatus::FAILED;
461   }
462 
463   const size_t PageSize = flags()->xray_page_size_override > 0
464                               ? flags()->xray_page_size_override
465                               : GetPageSizeCached();
466   if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
467     Report("Provided page size is not a power of two: %zu\n", PageSize);
468     return XRayPatchingStatus::FAILED;
469   }
470 
471   // Here we compute the minimum sled and maximum sled associated with a
472   // particular function ID.
473   XRayFunctionSledIndex SledRange;
474   if (InstrMap.SledsIndex) {
475     SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
476                  InstrMap.SledsIndex[FuncId - 1].Size};
477   } else {
478     SledRange = findFunctionSleds(FuncId, InstrMap);
479   }
480   auto *f = SledRange.Begin;
481   auto *e = SledRange.Begin + SledRange.Size;
482   auto *MinSled = f;
483   auto *MaxSled = e - 1;
484   while (f != e) {
485     if (f->address() < MinSled->address())
486       MinSled = f;
487     if (f->address() > MaxSled->address())
488       MaxSled = f;
489     ++f;
490   }
491 
492   void *PageAlignedAddr =
493       reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1));
494   size_t MProtectLen =
495       (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) +
496       cSledLength;
497   MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
498   if (Protector.MakeWriteable() == -1) {
499     Report("Failed mprotect: %d\n", errno);
500     return XRayPatchingStatus::FAILED;
501   }
502   return patchFunction(FuncId, ObjId, Enable);
503 }
504 
505 } // namespace
506 
507 } // namespace __xray
508 
509 using namespace __xray;
510 
511 // The following functions are declared `extern "C" {...}` in the header, hence
512 // they're defined in the global namespace.
513 
514 int __xray_set_handler(void (*entry)(int32_t,
515                                      XRayEntryType)) XRAY_NEVER_INSTRUMENT {
516   if (atomic_load(&XRayInitialized, memory_order_acquire)) {
517 
518     atomic_store(&__xray::XRayPatchedFunction,
519                  reinterpret_cast<uintptr_t>(entry), memory_order_release);
520     return 1;
521   }
522   return 0;
523 }
524 
525 int __xray_set_customevent_handler(void (*entry)(void *, size_t))
526     XRAY_NEVER_INSTRUMENT {
527   if (atomic_load(&XRayInitialized, memory_order_acquire)) {
528     atomic_store(&__xray::XRayPatchedCustomEvent,
529                  reinterpret_cast<uintptr_t>(entry), memory_order_release);
530     return 1;
531   }
532   return 0;
533 }
534 
535 int __xray_set_typedevent_handler(void (*entry)(size_t, const void *,
536                                                 size_t)) XRAY_NEVER_INSTRUMENT {
537   if (atomic_load(&XRayInitialized, memory_order_acquire)) {
538     atomic_store(&__xray::XRayPatchedTypedEvent,
539                  reinterpret_cast<uintptr_t>(entry), memory_order_release);
540     return 1;
541   }
542   return 0;
543 }
544 
545 int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
546   return __xray_set_handler(nullptr);
547 }
548 
549 int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
550   return __xray_set_customevent_handler(nullptr);
551 }
552 
553 int __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT {
554   return __xray_set_typedevent_handler(nullptr);
555 }
556 
557 uint16_t __xray_register_event_type(
558     const char *const event_type) XRAY_NEVER_INSTRUMENT {
559   TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type);
560   if (h.created()) {
561     h->type_id = atomic_fetch_add(
562         &TypeEventDescriptorCounter, 1, memory_order_acq_rel);
563     h->description_string_length = strnlen(event_type, 1024);
564   }
565   return h->type_id;
566 }
567 
568 XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
569   return controlPatching(true);
570 }
571 
572 XRayPatchingStatus __xray_patch_object(int32_t ObjId) XRAY_NEVER_INSTRUMENT {
573   return controlPatching(true, ObjId);
574 }
575 
576 XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
577   return controlPatching(false);
578 }
579 
580 XRayPatchingStatus __xray_unpatch_object(int32_t ObjId) XRAY_NEVER_INSTRUMENT {
581   return controlPatching(false, ObjId);
582 }
583 
584 XRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
585   auto Ids = __xray::UnpackId(FuncId);
586   auto ObjId = Ids.first;
587   auto FnId = Ids.second;
588   return mprotectAndPatchFunction(FnId, ObjId, true);
589 }
590 
591 XRayPatchingStatus
592 __xray_patch_function_in_object(int32_t FuncId,
593                                 int32_t ObjId) XRAY_NEVER_INSTRUMENT {
594   return mprotectAndPatchFunction(FuncId, ObjId, true);
595 }
596 
597 XRayPatchingStatus
598 __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
599   auto Ids = __xray::UnpackId(FuncId);
600   auto ObjId = Ids.first;
601   auto FnId = Ids.second;
602   return mprotectAndPatchFunction(FnId, ObjId, false);
603 }
604 
605 XRayPatchingStatus
606 __xray_unpatch_function_in_object(int32_t FuncId,
607                                   int32_t ObjId) XRAY_NEVER_INSTRUMENT {
608   return mprotectAndPatchFunction(FuncId, ObjId, false);
609 }
610 
611 int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
612   if (!atomic_load(&XRayInitialized, memory_order_acquire))
613     return 0;
614 
615   // A relaxed write might not be visible even if the current thread gets
616   // scheduled on a different CPU/NUMA node.  We need to wait for everyone to
617   // have this handler installed for consistency of collected data across CPUs.
618   atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
619                memory_order_release);
620   return 1;
621 }
622 
623 int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
624 
625 uintptr_t
626 __xray_function_address(int32_t CombinedFuncId) XRAY_NEVER_INSTRUMENT {
627   auto Ids = __xray::UnpackId(CombinedFuncId);
628   return __xray_function_address_in_object(Ids.second, Ids.first);
629 }
630 
631 uintptr_t __xray_function_address_in_object(int32_t FuncId, int32_t ObjId)
632     XRAY_NEVER_INSTRUMENT {
633   XRaySledMap InstrMap;
634   {
635     SpinMutexLock Guard(&XRayInstrMapMutex);
636     auto count = atomic_load(&XRayNumObjects, memory_order_acquire);
637     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >= count) {
638       Report("Unable to determine function address: invalid sled map index %d "
639              "(size is %d)\n",
640              ObjId, (int)count);
641       return 0;
642     }
643     InstrMap = XRayInstrMaps[ObjId];
644   }
645 
646   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions)
647     return 0;
648   const XRaySledEntry *Sled =
649       InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1].fromPCRelative()
650                           : findFunctionSleds(FuncId, InstrMap).Begin;
651   return Sled->function()
652 // On PPC, function entries are always aligned to 16 bytes. The beginning of a
653 // sled might be a local entry, which is always +8 based on the global entry.
654 // Always return the global entry.
655 #ifdef __PPC__
656          & ~0xf
657 #endif
658       ;
659 }
660 
661 size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
662   return __xray_max_function_id_in_object(0);
663 }
664 
665 size_t __xray_max_function_id_in_object(int32_t ObjId) XRAY_NEVER_INSTRUMENT {
666   SpinMutexLock Guard(&XRayInstrMapMutex);
667   if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
668                        atomic_load(&XRayNumObjects, memory_order_acquire))
669     return 0;
670   return XRayInstrMaps[ObjId].Functions;
671 }
672 
673 size_t __xray_num_objects() XRAY_NEVER_INSTRUMENT {
674   SpinMutexLock Guard(&XRayInstrMapMutex);
675   return atomic_load(&XRayNumObjects, memory_order_acquire);
676 }
677 
678 int32_t __xray_unpack_function_id(int32_t PackedId) {
679   return __xray::UnpackId(PackedId).second;
680 }
681 
682 int32_t __xray_unpack_object_id(int32_t PackedId) {
683   return __xray::UnpackId(PackedId).first;
684 }
685 
686 int32_t __xray_pack_id(int32_t FuncId, int32_t ObjId) {
687   return __xray::MakePackedId(FuncId, ObjId);
688 }
689