Home | History | Annotate | Line # | Download | only in xray
      1  1.1  kamil //===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
      2  1.1  kamil //
      3  1.1  kamil //                     The LLVM Compiler Infrastructure
      4  1.1  kamil //
      5  1.1  kamil // This file is distributed under the University of Illinois Open Source
      6  1.1  kamil // License. See LICENSE.TXT for details.
      7  1.1  kamil //
      8  1.1  kamil //===----------------------------------------------------------------------===//
      9  1.1  kamil //
     10  1.1  kamil // This file is a part of XRay, a dynamic runtime instrumentation system.
     11  1.1  kamil //
     12  1.1  kamil // Implementation of the API functions.
     13  1.1  kamil //
     14  1.1  kamil //===----------------------------------------------------------------------===//
     15  1.1  kamil 
     16  1.1  kamil #include "xray_interface_internal.h"
     17  1.1  kamil 
     18  1.1  kamil #include <cstdint>
     19  1.1  kamil #include <cstdio>
     20  1.1  kamil #include <errno.h>
     21  1.1  kamil #include <limits>
     22  1.1  kamil #include <string.h>
     23  1.1  kamil #include <sys/mman.h>
     24  1.1  kamil 
     25  1.1  kamil #if SANITIZER_FUCHSIA
     26  1.1  kamil #include <zircon/process.h>
     27  1.1  kamil #include <zircon/sanitizer.h>
     28  1.1  kamil #include <zircon/status.h>
     29  1.1  kamil #include <zircon/syscalls.h>
     30  1.1  kamil #endif
     31  1.1  kamil 
     32  1.1  kamil #include "sanitizer_common/sanitizer_addrhashmap.h"
     33  1.1  kamil #include "sanitizer_common/sanitizer_common.h"
     34  1.1  kamil 
     35  1.1  kamil #include "xray_defs.h"
     36  1.1  kamil #include "xray_flags.h"
     37  1.1  kamil 
     38  1.1  kamil extern __sanitizer::SpinMutex XRayInstrMapMutex;
     39  1.1  kamil extern __sanitizer::atomic_uint8_t XRayInitialized;
     40  1.1  kamil extern __xray::XRaySledMap XRayInstrMap;
     41  1.1  kamil 
     42  1.1  kamil namespace __xray {
     43  1.1  kamil 
     44  1.1  kamil #if defined(__x86_64__)
     45  1.1  kamil static const int16_t cSledLength = 12;
     46  1.1  kamil #elif defined(__aarch64__)
     47  1.1  kamil static const int16_t cSledLength = 32;
     48  1.1  kamil #elif defined(__arm__)
     49  1.1  kamil static const int16_t cSledLength = 28;
     50  1.1  kamil #elif SANITIZER_MIPS32
     51  1.1  kamil static const int16_t cSledLength = 48;
     52  1.1  kamil #elif SANITIZER_MIPS64
     53  1.1  kamil static const int16_t cSledLength = 64;
     54  1.1  kamil #elif defined(__powerpc64__)
     55  1.1  kamil static const int16_t cSledLength = 8;
     56  1.1  kamil #else
     57  1.1  kamil #error "Unsupported CPU Architecture"
     58  1.1  kamil #endif /* CPU architecture */
     59  1.1  kamil 
     60  1.1  kamil // This is the function to call when we encounter the entry or exit sleds.
     61  1.1  kamil atomic_uintptr_t XRayPatchedFunction{0};
     62  1.1  kamil 
     63  1.1  kamil // This is the function to call from the arg1-enabled sleds/trampolines.
     64  1.1  kamil atomic_uintptr_t XRayArgLogger{0};
     65  1.1  kamil 
     66  1.1  kamil // This is the function to call when we encounter a custom event log call.
     67  1.1  kamil atomic_uintptr_t XRayPatchedCustomEvent{0};
     68  1.1  kamil 
     69  1.1  kamil // This is the function to call when we encounter a typed event log call.
     70  1.1  kamil atomic_uintptr_t XRayPatchedTypedEvent{0};
     71  1.1  kamil 
     72  1.1  kamil // This is the global status to determine whether we are currently
     73  1.1  kamil // patching/unpatching.
     74  1.1  kamil atomic_uint8_t XRayPatching{0};
     75  1.1  kamil 
     76  1.1  kamil struct TypeDescription {
     77  1.1  kamil   uint32_t type_id;
     78  1.1  kamil   std::size_t description_string_length;
     79  1.1  kamil };
     80  1.1  kamil 
     81  1.1  kamil using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>;
     82  1.1  kamil // An address map from immutable descriptors to type ids.
     83  1.1  kamil TypeDescriptorMapType TypeDescriptorAddressMap{};
     84  1.1  kamil 
     85  1.1  kamil atomic_uint32_t TypeEventDescriptorCounter{0};
     86  1.1  kamil 
     87  1.1  kamil // MProtectHelper is an RAII wrapper for calls to mprotect(...) that will
     88  1.1  kamil // undo any successful mprotect(...) changes. This is used to make a page
     89  1.1  kamil // writeable and executable, and upon destruction if it was successful in
     90  1.1  kamil // doing so returns the page into a read-only and executable page.
     91  1.1  kamil //
     92  1.1  kamil // This is only used specifically for runtime-patching of the XRay
     93  1.1  kamil // instrumentation points. This assumes that the executable pages are
     94  1.1  kamil // originally read-and-execute only.
     95  1.1  kamil class MProtectHelper {
     96  1.1  kamil   void *PageAlignedAddr;
     97  1.1  kamil   std::size_t MProtectLen;
     98  1.1  kamil   bool MustCleanup;
     99  1.1  kamil 
    100  1.1  kamil public:
    101  1.1  kamil   explicit MProtectHelper(void *PageAlignedAddr,
    102  1.1  kamil                           std::size_t MProtectLen,
    103  1.1  kamil                           std::size_t PageSize) XRAY_NEVER_INSTRUMENT
    104  1.1  kamil       : PageAlignedAddr(PageAlignedAddr),
    105  1.1  kamil         MProtectLen(MProtectLen),
    106  1.1  kamil         MustCleanup(false) {
    107  1.1  kamil #if SANITIZER_FUCHSIA
    108  1.1  kamil     MProtectLen = RoundUpTo(MProtectLen, PageSize);
    109  1.1  kamil #endif
    110  1.1  kamil   }
    111  1.1  kamil 
    112  1.1  kamil   int MakeWriteable() XRAY_NEVER_INSTRUMENT {
    113  1.1  kamil #if SANITIZER_FUCHSIA
    114  1.1  kamil     auto R = __sanitizer_change_code_protection(
    115  1.1  kamil         reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, true);
    116  1.1  kamil     if (R != ZX_OK) {
    117  1.1  kamil       Report("XRay: cannot change code protection: %s\n",
    118  1.1  kamil              _zx_status_get_string(R));
    119  1.1  kamil       return -1;
    120  1.1  kamil     }
    121  1.1  kamil     MustCleanup = true;
    122  1.1  kamil     return 0;
    123  1.1  kamil #else
    124  1.1  kamil     auto R = mprotect(PageAlignedAddr, MProtectLen,
    125  1.1  kamil                       PROT_READ | PROT_WRITE | PROT_EXEC);
    126  1.1  kamil     if (R != -1)
    127  1.1  kamil       MustCleanup = true;
    128  1.1  kamil     return R;
    129  1.1  kamil #endif
    130  1.1  kamil   }
    131  1.1  kamil 
    132  1.1  kamil   ~MProtectHelper() XRAY_NEVER_INSTRUMENT {
    133  1.1  kamil     if (MustCleanup) {
    134  1.1  kamil #if SANITIZER_FUCHSIA
    135  1.1  kamil       auto R = __sanitizer_change_code_protection(
    136  1.1  kamil           reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, false);
    137  1.1  kamil       if (R != ZX_OK) {
    138  1.1  kamil         Report("XRay: cannot change code protection: %s\n",
    139  1.1  kamil                _zx_status_get_string(R));
    140  1.1  kamil       }
    141  1.1  kamil #else
    142  1.1  kamil       mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
    143  1.1  kamil #endif
    144  1.1  kamil     }
    145  1.1  kamil   }
    146  1.1  kamil };
    147  1.1  kamil 
    148  1.1  kamil namespace {
    149  1.1  kamil 
    150  1.1  kamil bool patchSled(const XRaySledEntry &Sled, bool Enable,
    151  1.1  kamil                int32_t FuncId) XRAY_NEVER_INSTRUMENT {
    152  1.1  kamil   bool Success = false;
    153  1.1  kamil   switch (Sled.Kind) {
    154  1.1  kamil   case XRayEntryType::ENTRY:
    155  1.1  kamil     Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
    156  1.1  kamil     break;
    157  1.1  kamil   case XRayEntryType::EXIT:
    158  1.1  kamil     Success = patchFunctionExit(Enable, FuncId, Sled);
    159  1.1  kamil     break;
    160  1.1  kamil   case XRayEntryType::TAIL:
    161  1.1  kamil     Success = patchFunctionTailExit(Enable, FuncId, Sled);
    162  1.1  kamil     break;
    163  1.1  kamil   case XRayEntryType::LOG_ARGS_ENTRY:
    164  1.1  kamil     Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
    165  1.1  kamil     break;
    166  1.1  kamil   case XRayEntryType::CUSTOM_EVENT:
    167  1.1  kamil     Success = patchCustomEvent(Enable, FuncId, Sled);
    168  1.1  kamil     break;
    169  1.1  kamil   case XRayEntryType::TYPED_EVENT:
    170  1.1  kamil     Success = patchTypedEvent(Enable, FuncId, Sled);
    171  1.1  kamil     break;
    172  1.1  kamil   default:
    173  1.1  kamil     Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));
    174  1.1  kamil     return false;
    175  1.1  kamil   }
    176  1.1  kamil   return Success;
    177  1.1  kamil }
    178  1.1  kamil 
    179  1.1  kamil XRayPatchingStatus patchFunction(int32_t FuncId,
    180  1.1  kamil                                  bool Enable) XRAY_NEVER_INSTRUMENT {
    181  1.1  kamil   if (!atomic_load(&XRayInitialized,
    182  1.1  kamil                                 memory_order_acquire))
    183  1.1  kamil     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
    184  1.1  kamil 
    185  1.1  kamil   uint8_t NotPatching = false;
    186  1.1  kamil   if (!atomic_compare_exchange_strong(
    187  1.1  kamil           &XRayPatching, &NotPatching, true, memory_order_acq_rel))
    188  1.1  kamil     return XRayPatchingStatus::ONGOING; // Already patching.
    189  1.1  kamil 
    190  1.1  kamil   // Next, we look for the function index.
    191  1.1  kamil   XRaySledMap InstrMap;
    192  1.1  kamil   {
    193  1.1  kamil     SpinMutexLock Guard(&XRayInstrMapMutex);
    194  1.1  kamil     InstrMap = XRayInstrMap;
    195  1.1  kamil   }
    196  1.1  kamil 
    197  1.1  kamil   // If we don't have an index, we can't patch individual functions.
    198  1.1  kamil   if (InstrMap.Functions == 0)
    199  1.1  kamil     return XRayPatchingStatus::NOT_INITIALIZED;
    200  1.1  kamil 
    201  1.1  kamil   // FuncId must be a positive number, less than the number of functions
    202  1.1  kamil   // instrumented.
    203  1.1  kamil   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
    204  1.1  kamil     Report("Invalid function id provided: %d\n", FuncId);
    205  1.1  kamil     return XRayPatchingStatus::FAILED;
    206  1.1  kamil   }
    207  1.1  kamil 
    208  1.1  kamil   // Now we patch ths sleds for this specific function.
    209  1.1  kamil   auto SledRange = InstrMap.SledsIndex[FuncId - 1];
    210  1.1  kamil   auto *f = SledRange.Begin;
    211  1.1  kamil   auto *e = SledRange.End;
    212  1.1  kamil 
    213  1.1  kamil   bool SucceedOnce = false;
    214  1.1  kamil   while (f != e)
    215  1.1  kamil     SucceedOnce |= patchSled(*f++, Enable, FuncId);
    216  1.1  kamil 
    217  1.1  kamil   atomic_store(&XRayPatching, false,
    218  1.1  kamil                             memory_order_release);
    219  1.1  kamil 
    220  1.1  kamil   if (!SucceedOnce) {
    221  1.1  kamil     Report("Failed patching any sled for function '%d'.", FuncId);
    222  1.1  kamil     return XRayPatchingStatus::FAILED;
    223  1.1  kamil   }
    224  1.1  kamil 
    225  1.1  kamil   return XRayPatchingStatus::SUCCESS;
    226  1.1  kamil }
    227  1.1  kamil 
    228  1.1  kamil // controlPatching implements the common internals of the patching/unpatching
    229  1.1  kamil // implementation. |Enable| defines whether we're enabling or disabling the
    230  1.1  kamil // runtime XRay instrumentation.
    231  1.1  kamil XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
    232  1.1  kamil   if (!atomic_load(&XRayInitialized,
    233  1.1  kamil                                 memory_order_acquire))
    234  1.1  kamil     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
    235  1.1  kamil 
    236  1.1  kamil   uint8_t NotPatching = false;
    237  1.1  kamil   if (!atomic_compare_exchange_strong(
    238  1.1  kamil           &XRayPatching, &NotPatching, true, memory_order_acq_rel))
    239  1.1  kamil     return XRayPatchingStatus::ONGOING; // Already patching.
    240  1.1  kamil 
    241  1.1  kamil   uint8_t PatchingSuccess = false;
    242  1.1  kamil   auto XRayPatchingStatusResetter =
    243  1.1  kamil       at_scope_exit([&PatchingSuccess] {
    244  1.1  kamil         if (!PatchingSuccess)
    245  1.1  kamil           atomic_store(&XRayPatching, false,
    246  1.1  kamil                                     memory_order_release);
    247  1.1  kamil       });
    248  1.1  kamil 
    249  1.1  kamil   XRaySledMap InstrMap;
    250  1.1  kamil   {
    251  1.1  kamil     SpinMutexLock Guard(&XRayInstrMapMutex);
    252  1.1  kamil     InstrMap = XRayInstrMap;
    253  1.1  kamil   }
    254  1.1  kamil   if (InstrMap.Entries == 0)
    255  1.1  kamil     return XRayPatchingStatus::NOT_INITIALIZED;
    256  1.1  kamil 
    257  1.1  kamil   uint32_t FuncId = 1;
    258  1.1  kamil   uint64_t CurFun = 0;
    259  1.1  kamil 
    260  1.1  kamil   // First we want to find the bounds for which we have instrumentation points,
    261  1.1  kamil   // and try to get as few calls to mprotect(...) as possible. We're assuming
    262  1.1  kamil   // that all the sleds for the instrumentation map are contiguous as a single
    263  1.1  kamil   // set of pages. When we do support dynamic shared object instrumentation,
    264  1.1  kamil   // we'll need to do this for each set of page load offsets per DSO loaded. For
    265  1.1  kamil   // now we're assuming we can mprotect the whole section of text between the
    266  1.1  kamil   // minimum sled address and the maximum sled address (+ the largest sled
    267  1.1  kamil   // size).
    268  1.1  kamil   auto MinSled = InstrMap.Sleds[0];
    269  1.1  kamil   auto MaxSled = InstrMap.Sleds[InstrMap.Entries - 1];
    270  1.1  kamil   for (std::size_t I = 0; I < InstrMap.Entries; I++) {
    271  1.1  kamil     const auto &Sled = InstrMap.Sleds[I];
    272  1.1  kamil     if (Sled.Address < MinSled.Address)
    273  1.1  kamil       MinSled = Sled;
    274  1.1  kamil     if (Sled.Address > MaxSled.Address)
    275  1.1  kamil       MaxSled = Sled;
    276  1.1  kamil   }
    277  1.1  kamil 
    278  1.1  kamil   const size_t PageSize = flags()->xray_page_size_override > 0
    279  1.1  kamil                               ? flags()->xray_page_size_override
    280  1.1  kamil                               : GetPageSizeCached();
    281  1.1  kamil   if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
    282  1.1  kamil     Report("System page size is not a power of two: %lld\n", PageSize);
    283  1.1  kamil     return XRayPatchingStatus::FAILED;
    284  1.1  kamil   }
    285  1.1  kamil 
    286  1.1  kamil   void *PageAlignedAddr =
    287  1.1  kamil       reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1));
    288  1.1  kamil   size_t MProtectLen =
    289  1.1  kamil       (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength;
    290  1.1  kamil   MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
    291  1.1  kamil   if (Protector.MakeWriteable() == -1) {
    292  1.1  kamil     Report("Failed mprotect: %d\n", errno);
    293  1.1  kamil     return XRayPatchingStatus::FAILED;
    294  1.1  kamil   }
    295  1.1  kamil 
    296  1.1  kamil   for (std::size_t I = 0; I < InstrMap.Entries; ++I) {
    297  1.1  kamil     auto &Sled = InstrMap.Sleds[I];
    298  1.1  kamil     auto F = Sled.Function;
    299  1.1  kamil     if (CurFun == 0)
    300  1.1  kamil       CurFun = F;
    301  1.1  kamil     if (F != CurFun) {
    302  1.1  kamil       ++FuncId;
    303  1.1  kamil       CurFun = F;
    304  1.1  kamil     }
    305  1.1  kamil     patchSled(Sled, Enable, FuncId);
    306  1.1  kamil   }
    307  1.1  kamil   atomic_store(&XRayPatching, false,
    308  1.1  kamil                             memory_order_release);
    309  1.1  kamil   PatchingSuccess = true;
    310  1.1  kamil   return XRayPatchingStatus::SUCCESS;
    311  1.1  kamil }
    312  1.1  kamil 
    313  1.1  kamil XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
    314  1.1  kamil                                             bool Enable) XRAY_NEVER_INSTRUMENT {
    315  1.1  kamil   XRaySledMap InstrMap;
    316  1.1  kamil   {
    317  1.1  kamil     SpinMutexLock Guard(&XRayInstrMapMutex);
    318  1.1  kamil     InstrMap = XRayInstrMap;
    319  1.1  kamil   }
    320  1.1  kamil 
    321  1.1  kamil   // FuncId must be a positive number, less than the number of functions
    322  1.1  kamil   // instrumented.
    323  1.1  kamil   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
    324  1.1  kamil     Report("Invalid function id provided: %d\n", FuncId);
    325  1.1  kamil     return XRayPatchingStatus::FAILED;
    326  1.1  kamil   }
    327  1.1  kamil 
    328  1.1  kamil   const size_t PageSize = flags()->xray_page_size_override > 0
    329  1.1  kamil                               ? flags()->xray_page_size_override
    330  1.1  kamil                               : GetPageSizeCached();
    331  1.1  kamil   if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
    332  1.1  kamil     Report("Provided page size is not a power of two: %lld\n", PageSize);
    333  1.1  kamil     return XRayPatchingStatus::FAILED;
    334  1.1  kamil   }
    335  1.1  kamil 
    336  1.1  kamil   // Here we compute the minumum sled and maximum sled associated with a
    337  1.1  kamil   // particular function ID.
    338  1.1  kamil   auto SledRange = InstrMap.SledsIndex[FuncId - 1];
    339  1.1  kamil   auto *f = SledRange.Begin;
    340  1.1  kamil   auto *e = SledRange.End;
    341  1.1  kamil   auto MinSled = *f;
    342  1.1  kamil   auto MaxSled = *(SledRange.End - 1);
    343  1.1  kamil   while (f != e) {
    344  1.1  kamil     if (f->Address < MinSled.Address)
    345  1.1  kamil       MinSled = *f;
    346  1.1  kamil     if (f->Address > MaxSled.Address)
    347  1.1  kamil       MaxSled = *f;
    348  1.1  kamil     ++f;
    349  1.1  kamil   }
    350  1.1  kamil 
    351  1.1  kamil   void *PageAlignedAddr =
    352  1.1  kamil       reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1));
    353  1.1  kamil   size_t MProtectLen =
    354  1.1  kamil       (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength;
    355  1.1  kamil   MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
    356  1.1  kamil   if (Protector.MakeWriteable() == -1) {
    357  1.1  kamil     Report("Failed mprotect: %d\n", errno);
    358  1.1  kamil     return XRayPatchingStatus::FAILED;
    359  1.1  kamil   }
    360  1.1  kamil   return patchFunction(FuncId, Enable);
    361  1.1  kamil }
    362  1.1  kamil 
    363  1.1  kamil } // namespace
    364  1.1  kamil 
    365  1.1  kamil } // namespace __xray
    366  1.1  kamil 
    367  1.1  kamil using namespace __xray;
    368  1.1  kamil 
    369  1.1  kamil // The following functions are declared `extern "C" {...}` in the header, hence
    370  1.1  kamil // they're defined in the global namespace.
    371  1.1  kamil 
    372  1.1  kamil int __xray_set_handler(void (*entry)(int32_t,
    373  1.1  kamil                                      XRayEntryType)) XRAY_NEVER_INSTRUMENT {
    374  1.1  kamil   if (atomic_load(&XRayInitialized,
    375  1.1  kamil                                memory_order_acquire)) {
    376  1.1  kamil 
    377  1.1  kamil     atomic_store(&__xray::XRayPatchedFunction,
    378  1.1  kamil                               reinterpret_cast<uintptr_t>(entry),
    379  1.1  kamil                               memory_order_release);
    380  1.1  kamil     return 1;
    381  1.1  kamil   }
    382  1.1  kamil   return 0;
    383  1.1  kamil }
    384  1.1  kamil 
    385  1.1  kamil int __xray_set_customevent_handler(void (*entry)(void *, size_t))
    386  1.1  kamil     XRAY_NEVER_INSTRUMENT {
    387  1.1  kamil   if (atomic_load(&XRayInitialized,
    388  1.1  kamil                                memory_order_acquire)) {
    389  1.1  kamil     atomic_store(&__xray::XRayPatchedCustomEvent,
    390  1.1  kamil                               reinterpret_cast<uintptr_t>(entry),
    391  1.1  kamil                               memory_order_release);
    392  1.1  kamil     return 1;
    393  1.1  kamil   }
    394  1.1  kamil   return 0;
    395  1.1  kamil }
    396  1.1  kamil 
    397  1.1  kamil int __xray_set_typedevent_handler(void (*entry)(
    398  1.1  kamil     uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT {
    399  1.1  kamil   if (atomic_load(&XRayInitialized,
    400  1.1  kamil                                memory_order_acquire)) {
    401  1.1  kamil     atomic_store(&__xray::XRayPatchedTypedEvent,
    402  1.1  kamil                               reinterpret_cast<uintptr_t>(entry),
    403  1.1  kamil                               memory_order_release);
    404  1.1  kamil     return 1;
    405  1.1  kamil   }
    406  1.1  kamil   return 0;
    407  1.1  kamil }
    408  1.1  kamil 
    409  1.1  kamil int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
    410  1.1  kamil   return __xray_set_handler(nullptr);
    411  1.1  kamil }
    412  1.1  kamil 
    413  1.1  kamil int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
    414  1.1  kamil   return __xray_set_customevent_handler(nullptr);
    415  1.1  kamil }
    416  1.1  kamil 
    417  1.1  kamil int __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT {
    418  1.1  kamil   return __xray_set_typedevent_handler(nullptr);
    419  1.1  kamil }
    420  1.1  kamil 
    421  1.1  kamil uint16_t __xray_register_event_type(
    422  1.1  kamil     const char *const event_type) XRAY_NEVER_INSTRUMENT {
    423  1.1  kamil   TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type);
    424  1.1  kamil   if (h.created()) {
    425  1.1  kamil     h->type_id = atomic_fetch_add(
    426  1.1  kamil         &TypeEventDescriptorCounter, 1, memory_order_acq_rel);
    427  1.1  kamil     h->description_string_length = strnlen(event_type, 1024);
    428  1.1  kamil   }
    429  1.1  kamil   return h->type_id;
    430  1.1  kamil }
    431  1.1  kamil 
    432  1.1  kamil XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
    433  1.1  kamil   return controlPatching(true);
    434  1.1  kamil }
    435  1.1  kamil 
    436  1.1  kamil XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
    437  1.1  kamil   return controlPatching(false);
    438  1.1  kamil }
    439  1.1  kamil 
    440  1.1  kamil XRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
    441  1.1  kamil   return mprotectAndPatchFunction(FuncId, true);
    442  1.1  kamil }
    443  1.1  kamil 
    444  1.1  kamil XRayPatchingStatus
    445  1.1  kamil __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
    446  1.1  kamil   return mprotectAndPatchFunction(FuncId, false);
    447  1.1  kamil }
    448  1.1  kamil 
    449  1.1  kamil int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
    450  1.1  kamil   if (!atomic_load(&XRayInitialized,
    451  1.1  kamil                                 memory_order_acquire))
    452  1.1  kamil     return 0;
    453  1.1  kamil 
    454  1.1  kamil   // A relaxed write might not be visible even if the current thread gets
    455  1.1  kamil   // scheduled on a different CPU/NUMA node.  We need to wait for everyone to
    456  1.1  kamil   // have this handler installed for consistency of collected data across CPUs.
    457  1.1  kamil   atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
    458  1.1  kamil                             memory_order_release);
    459  1.1  kamil   return 1;
    460  1.1  kamil }
    461  1.1  kamil 
    462  1.1  kamil int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
    463  1.1  kamil 
    464  1.1  kamil uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
    465  1.1  kamil   SpinMutexLock Guard(&XRayInstrMapMutex);
    466  1.1  kamil   if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)
    467  1.1  kamil     return 0;
    468  1.1  kamil   return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function
    469  1.1  kamil // On PPC, function entries are always aligned to 16 bytes. The beginning of a
    470  1.1  kamil // sled might be a local entry, which is always +8 based on the global entry.
    471  1.1  kamil // Always return the global entry.
    472  1.1  kamil #ifdef __PPC__
    473  1.1  kamil          & ~0xf
    474  1.1  kamil #endif
    475  1.1  kamil       ;
    476  1.1  kamil }
    477  1.1  kamil 
    478  1.1  kamil size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
    479  1.1  kamil   SpinMutexLock Guard(&XRayInstrMapMutex);
    480  1.1  kamil   return XRayInstrMap.Functions;
    481  1.1  kamil }
    482