Home | History | Annotate | Line # | Download | only in xray
      1  1.1  kamil //===-- xray_profiling.cc ---------------------------------------*- C++ -*-===//
      2  1.1  kamil //
      3  1.1  kamil //                     The LLVM Compiler Infrastructure
      4  1.1  kamil //
      5  1.1  kamil // This file is distributed under the University of Illinois Open Source
      6  1.1  kamil // License. See LICENSE.TXT for details.
      7  1.1  kamil //
      8  1.1  kamil //===----------------------------------------------------------------------===//
      9  1.1  kamil //
     10  1.1  kamil // This file is a part of XRay, a dynamic runtime instrumentation system.
     11  1.1  kamil //
     12  1.1  kamil // This is the implementation of a profiling handler.
     13  1.1  kamil //
     14  1.1  kamil //===----------------------------------------------------------------------===//
     15  1.1  kamil #include <memory>
     16  1.1  kamil #include <time.h>
     17  1.1  kamil 
     18  1.1  kamil #include "sanitizer_common/sanitizer_atomic.h"
     19  1.1  kamil #include "sanitizer_common/sanitizer_flags.h"
     20  1.1  kamil #include "xray/xray_interface.h"
     21  1.1  kamil #include "xray/xray_log_interface.h"
     22  1.1  kamil #include "xray_buffer_queue.h"
     23  1.1  kamil #include "xray_flags.h"
     24  1.1  kamil #include "xray_profile_collector.h"
     25  1.1  kamil #include "xray_profiling_flags.h"
     26  1.1  kamil #include "xray_recursion_guard.h"
     27  1.1  kamil #include "xray_tsc.h"
     28  1.1  kamil #include "xray_utils.h"
     29  1.1  kamil #include <pthread.h>
     30  1.1  kamil 
     31  1.1  kamil namespace __xray {
     32  1.1  kamil 
     33  1.1  kamil namespace {
     34  1.1  kamil 
     35  1.1  kamil static atomic_sint32_t ProfilerLogFlushStatus = {
     36  1.1  kamil     XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING};
     37  1.1  kamil 
     38  1.1  kamil static atomic_sint32_t ProfilerLogStatus = {
     39  1.1  kamil     XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
     40  1.1  kamil 
     41  1.1  kamil static SpinMutex ProfilerOptionsMutex;
     42  1.1  kamil 
     43  1.1  kamil struct ProfilingData {
     44  1.1  kamil   atomic_uintptr_t Allocators;
     45  1.1  kamil   atomic_uintptr_t FCT;
     46  1.1  kamil };
     47  1.1  kamil 
     48  1.1  kamil static pthread_key_t ProfilingKey;
     49  1.1  kamil 
     50  1.1  kamil // We use a global buffer queue, which gets initialized once at initialisation
     51  1.1  kamil // time, and gets reset when profiling is "done".
     52  1.1  kamil static std::aligned_storage<sizeof(BufferQueue), alignof(BufferQueue)>::type
     53  1.1  kamil     BufferQueueStorage;
     54  1.1  kamil static BufferQueue *BQ = nullptr;
     55  1.1  kamil 
     56  1.1  kamil thread_local FunctionCallTrie::Allocators::Buffers ThreadBuffers;
     57  1.1  kamil thread_local std::aligned_storage<sizeof(FunctionCallTrie::Allocators),
     58  1.1  kamil                                   alignof(FunctionCallTrie::Allocators)>::type
     59  1.1  kamil     AllocatorsStorage;
     60  1.1  kamil thread_local std::aligned_storage<sizeof(FunctionCallTrie),
     61  1.1  kamil                                   alignof(FunctionCallTrie)>::type
     62  1.1  kamil     FunctionCallTrieStorage;
     63  1.1  kamil thread_local ProfilingData TLD{{0}, {0}};
     64  1.1  kamil thread_local atomic_uint8_t ReentranceGuard{0};
     65  1.1  kamil 
     66  1.1  kamil // We use a separate guard for ensuring that for this thread, if we're already
     67  1.1  kamil // cleaning up, that any signal handlers don't attempt to cleanup nor
     68  1.1  kamil // initialise.
     69  1.1  kamil thread_local atomic_uint8_t TLDInitGuard{0};
     70  1.1  kamil 
     71  1.1  kamil // We also use a separate latch to signal that the thread is exiting, and
     72  1.1  kamil // non-essential work should be ignored (things like recording events, etc.).
     73  1.1  kamil thread_local atomic_uint8_t ThreadExitingLatch{0};
     74  1.1  kamil 
     75  1.1  kamil static ProfilingData *getThreadLocalData() XRAY_NEVER_INSTRUMENT {
     76  1.1  kamil   thread_local auto ThreadOnce = []() XRAY_NEVER_INSTRUMENT {
     77  1.1  kamil     pthread_setspecific(ProfilingKey, &TLD);
     78  1.1  kamil     return false;
     79  1.1  kamil   }();
     80  1.1  kamil   (void)ThreadOnce;
     81  1.1  kamil 
     82  1.1  kamil   RecursionGuard TLDInit(TLDInitGuard);
     83  1.1  kamil   if (!TLDInit)
     84  1.1  kamil     return nullptr;
     85  1.1  kamil 
     86  1.1  kamil   if (atomic_load_relaxed(&ThreadExitingLatch))
     87  1.1  kamil     return nullptr;
     88  1.1  kamil 
     89  1.1  kamil   uptr Allocators = 0;
     90  1.1  kamil   if (atomic_compare_exchange_strong(&TLD.Allocators, &Allocators, 1,
     91  1.1  kamil                                      memory_order_acq_rel)) {
     92  1.1  kamil     bool Success = false;
     93  1.1  kamil     auto AllocatorsUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
     94  1.1  kamil       if (!Success)
     95  1.1  kamil         atomic_store(&TLD.Allocators, 0, memory_order_release);
     96  1.1  kamil     });
     97  1.1  kamil 
     98  1.1  kamil     // Acquire a set of buffers for this thread.
     99  1.1  kamil     if (BQ == nullptr)
    100  1.1  kamil       return nullptr;
    101  1.1  kamil 
    102  1.1  kamil     if (BQ->getBuffer(ThreadBuffers.NodeBuffer) != BufferQueue::ErrorCode::Ok)
    103  1.1  kamil       return nullptr;
    104  1.1  kamil     auto NodeBufferUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
    105  1.1  kamil       if (!Success)
    106  1.1  kamil         BQ->releaseBuffer(ThreadBuffers.NodeBuffer);
    107  1.1  kamil     });
    108  1.1  kamil 
    109  1.1  kamil     if (BQ->getBuffer(ThreadBuffers.RootsBuffer) != BufferQueue::ErrorCode::Ok)
    110  1.1  kamil       return nullptr;
    111  1.1  kamil     auto RootsBufferUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
    112  1.1  kamil       if (!Success)
    113  1.1  kamil         BQ->releaseBuffer(ThreadBuffers.RootsBuffer);
    114  1.1  kamil     });
    115  1.1  kamil 
    116  1.1  kamil     if (BQ->getBuffer(ThreadBuffers.ShadowStackBuffer) !=
    117  1.1  kamil         BufferQueue::ErrorCode::Ok)
    118  1.1  kamil       return nullptr;
    119  1.1  kamil     auto ShadowStackBufferUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
    120  1.1  kamil       if (!Success)
    121  1.1  kamil         BQ->releaseBuffer(ThreadBuffers.ShadowStackBuffer);
    122  1.1  kamil     });
    123  1.1  kamil 
    124  1.1  kamil     if (BQ->getBuffer(ThreadBuffers.NodeIdPairBuffer) !=
    125  1.1  kamil         BufferQueue::ErrorCode::Ok)
    126  1.1  kamil       return nullptr;
    127  1.1  kamil 
    128  1.1  kamil     Success = true;
    129  1.1  kamil     new (&AllocatorsStorage) FunctionCallTrie::Allocators(
    130  1.1  kamil         FunctionCallTrie::InitAllocatorsFromBuffers(ThreadBuffers));
    131  1.1  kamil     Allocators = reinterpret_cast<uptr>(
    132  1.1  kamil         reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage));
    133  1.1  kamil     atomic_store(&TLD.Allocators, Allocators, memory_order_release);
    134  1.1  kamil   }
    135  1.1  kamil 
    136  1.1  kamil   if (Allocators == 1)
    137  1.1  kamil     return nullptr;
    138  1.1  kamil 
    139  1.1  kamil   uptr FCT = 0;
    140  1.1  kamil   if (atomic_compare_exchange_strong(&TLD.FCT, &FCT, 1, memory_order_acq_rel)) {
    141  1.1  kamil     new (&FunctionCallTrieStorage)
    142  1.1  kamil         FunctionCallTrie(*reinterpret_cast<FunctionCallTrie::Allocators *>(
    143  1.1  kamil             atomic_load_relaxed(&TLD.Allocators)));
    144  1.1  kamil     FCT = reinterpret_cast<uptr>(
    145  1.1  kamil         reinterpret_cast<FunctionCallTrie *>(&FunctionCallTrieStorage));
    146  1.1  kamil     atomic_store(&TLD.FCT, FCT, memory_order_release);
    147  1.1  kamil   }
    148  1.1  kamil 
    149  1.1  kamil   if (FCT == 1)
    150  1.1  kamil     return nullptr;
    151  1.1  kamil 
    152  1.1  kamil   return &TLD;
    153  1.1  kamil }
    154  1.1  kamil 
    155  1.1  kamil static void cleanupTLD() XRAY_NEVER_INSTRUMENT {
    156  1.1  kamil   auto FCT = atomic_exchange(&TLD.FCT, 0, memory_order_acq_rel);
    157  1.1  kamil   if (FCT == reinterpret_cast<uptr>(reinterpret_cast<FunctionCallTrie *>(
    158  1.1  kamil                  &FunctionCallTrieStorage)))
    159  1.1  kamil     reinterpret_cast<FunctionCallTrie *>(FCT)->~FunctionCallTrie();
    160  1.1  kamil 
    161  1.1  kamil   auto Allocators = atomic_exchange(&TLD.Allocators, 0, memory_order_acq_rel);
    162  1.1  kamil   if (Allocators ==
    163  1.1  kamil       reinterpret_cast<uptr>(
    164  1.1  kamil           reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage)))
    165  1.1  kamil     reinterpret_cast<FunctionCallTrie::Allocators *>(Allocators)->~Allocators();
    166  1.1  kamil }
    167  1.1  kamil 
    168  1.1  kamil static void postCurrentThreadFCT(ProfilingData &T) XRAY_NEVER_INSTRUMENT {
    169  1.1  kamil   RecursionGuard TLDInit(TLDInitGuard);
    170  1.1  kamil   if (!TLDInit)
    171  1.1  kamil     return;
    172  1.1  kamil 
    173  1.1  kamil   uptr P = atomic_exchange(&T.FCT, 0, memory_order_acq_rel);
    174  1.1  kamil   if (P != reinterpret_cast<uptr>(
    175  1.1  kamil                reinterpret_cast<FunctionCallTrie *>(&FunctionCallTrieStorage)))
    176  1.1  kamil     return;
    177  1.1  kamil 
    178  1.1  kamil   auto FCT = reinterpret_cast<FunctionCallTrie *>(P);
    179  1.1  kamil   DCHECK_NE(FCT, nullptr);
    180  1.1  kamil 
    181  1.1  kamil   uptr A = atomic_exchange(&T.Allocators, 0, memory_order_acq_rel);
    182  1.1  kamil   if (A !=
    183  1.1  kamil       reinterpret_cast<uptr>(
    184  1.1  kamil           reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage)))
    185  1.1  kamil     return;
    186  1.1  kamil 
    187  1.1  kamil   auto Allocators = reinterpret_cast<FunctionCallTrie::Allocators *>(A);
    188  1.1  kamil   DCHECK_NE(Allocators, nullptr);
    189  1.1  kamil 
    190  1.1  kamil   // Always move the data into the profile collector.
    191  1.1  kamil   profileCollectorService::post(BQ, std::move(*FCT), std::move(*Allocators),
    192  1.1  kamil                                 std::move(ThreadBuffers), GetTid());
    193  1.1  kamil 
    194  1.1  kamil   // Re-initialize the ThreadBuffers object to a known "default" state.
    195  1.1  kamil   ThreadBuffers = FunctionCallTrie::Allocators::Buffers{};
    196  1.1  kamil }
    197  1.1  kamil 
    198  1.1  kamil } // namespace
    199  1.1  kamil 
    200  1.1  kamil const char *profilingCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT {
    201  1.1  kamil #ifdef XRAY_PROFILER_DEFAULT_OPTIONS
    202  1.1  kamil   return SANITIZER_STRINGIFY(XRAY_PROFILER_DEFAULT_OPTIONS);
    203  1.1  kamil #else
    204  1.1  kamil   return "";
    205  1.1  kamil #endif
    206  1.1  kamil }
    207  1.1  kamil 
    208  1.1  kamil XRayLogFlushStatus profilingFlush() XRAY_NEVER_INSTRUMENT {
    209  1.1  kamil   if (atomic_load(&ProfilerLogStatus, memory_order_acquire) !=
    210  1.1  kamil       XRayLogInitStatus::XRAY_LOG_FINALIZED) {
    211  1.1  kamil     if (Verbosity())
    212  1.1  kamil       Report("Not flushing profiles, profiling not been finalized.\n");
    213  1.1  kamil     return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
    214  1.1  kamil   }
    215  1.1  kamil 
    216  1.1  kamil   RecursionGuard SignalGuard(ReentranceGuard);
    217  1.1  kamil   if (!SignalGuard) {
    218  1.1  kamil     if (Verbosity())
    219  1.1  kamil       Report("Cannot finalize properly inside a signal handler!\n");
    220  1.1  kamil     atomic_store(&ProfilerLogFlushStatus,
    221  1.1  kamil                  XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING,
    222  1.1  kamil                  memory_order_release);
    223  1.1  kamil     return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
    224  1.1  kamil   }
    225  1.1  kamil 
    226  1.1  kamil   s32 Previous = atomic_exchange(&ProfilerLogFlushStatus,
    227  1.1  kamil                                  XRayLogFlushStatus::XRAY_LOG_FLUSHING,
    228  1.1  kamil                                  memory_order_acq_rel);
    229  1.1  kamil   if (Previous == XRayLogFlushStatus::XRAY_LOG_FLUSHING) {
    230  1.1  kamil     if (Verbosity())
    231  1.1  kamil       Report("Not flushing profiles, implementation still flushing.\n");
    232  1.1  kamil     return XRayLogFlushStatus::XRAY_LOG_FLUSHING;
    233  1.1  kamil   }
    234  1.1  kamil 
    235  1.1  kamil   // At this point, we'll create the file that will contain the profile, but
    236  1.1  kamil   // only if the options say so.
    237  1.1  kamil   if (!profilingFlags()->no_flush) {
    238  1.1  kamil     // First check whether we have data in the profile collector service
    239  1.1  kamil     // before we try and write anything down.
    240  1.1  kamil     XRayBuffer B = profileCollectorService::nextBuffer({nullptr, 0});
    241  1.1  kamil     if (B.Data == nullptr) {
    242  1.1  kamil       if (Verbosity())
    243  1.1  kamil         Report("profiling: No data to flush.\n");
    244  1.1  kamil     } else {
    245  1.1  kamil       LogWriter *LW = LogWriter::Open();
    246  1.1  kamil       if (LW == nullptr) {
    247  1.1  kamil         if (Verbosity())
    248  1.1  kamil           Report("profiling: Failed to flush to file, dropping data.\n");
    249  1.1  kamil       } else {
    250  1.1  kamil         // Now for each of the buffers, write out the profile data as we would
    251  1.1  kamil         // see it in memory, verbatim.
    252  1.1  kamil         while (B.Data != nullptr && B.Size != 0) {
    253  1.1  kamil           LW->WriteAll(reinterpret_cast<const char *>(B.Data),
    254  1.1  kamil                        reinterpret_cast<const char *>(B.Data) + B.Size);
    255  1.1  kamil           B = profileCollectorService::nextBuffer(B);
    256  1.1  kamil         }
    257  1.1  kamil       }
    258  1.1  kamil       LogWriter::Close(LW);
    259  1.1  kamil     }
    260  1.1  kamil   }
    261  1.1  kamil 
    262  1.1  kamil   profileCollectorService::reset();
    263  1.1  kamil 
    264  1.1  kamil   atomic_store(&ProfilerLogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED,
    265  1.1  kamil                memory_order_release);
    266  1.1  kamil   atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
    267  1.1  kamil                memory_order_release);
    268  1.1  kamil 
    269  1.1  kamil   return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
    270  1.1  kamil }
    271  1.1  kamil 
    272  1.1  kamil void profilingHandleArg0(int32_t FuncId,
    273  1.1  kamil                          XRayEntryType Entry) XRAY_NEVER_INSTRUMENT {
    274  1.1  kamil   unsigned char CPU;
    275  1.1  kamil   auto TSC = readTSC(CPU);
    276  1.1  kamil   RecursionGuard G(ReentranceGuard);
    277  1.1  kamil   if (!G)
    278  1.1  kamil     return;
    279  1.1  kamil 
    280  1.1  kamil   auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire);
    281  1.1  kamil   if (UNLIKELY(Status == XRayLogInitStatus::XRAY_LOG_UNINITIALIZED ||
    282  1.1  kamil                Status == XRayLogInitStatus::XRAY_LOG_INITIALIZING))
    283  1.1  kamil     return;
    284  1.1  kamil 
    285  1.1  kamil   if (UNLIKELY(Status == XRayLogInitStatus::XRAY_LOG_FINALIZED ||
    286  1.1  kamil                Status == XRayLogInitStatus::XRAY_LOG_FINALIZING)) {
    287  1.1  kamil     postCurrentThreadFCT(TLD);
    288  1.1  kamil     return;
    289  1.1  kamil   }
    290  1.1  kamil 
    291  1.1  kamil   auto T = getThreadLocalData();
    292  1.1  kamil   if (T == nullptr)
    293  1.1  kamil     return;
    294  1.1  kamil 
    295  1.1  kamil   auto FCT = reinterpret_cast<FunctionCallTrie *>(atomic_load_relaxed(&T->FCT));
    296  1.1  kamil   switch (Entry) {
    297  1.1  kamil   case XRayEntryType::ENTRY:
    298  1.1  kamil   case XRayEntryType::LOG_ARGS_ENTRY:
    299  1.1  kamil     FCT->enterFunction(FuncId, TSC, CPU);
    300  1.1  kamil     break;
    301  1.1  kamil   case XRayEntryType::EXIT:
    302  1.1  kamil   case XRayEntryType::TAIL:
    303  1.1  kamil     FCT->exitFunction(FuncId, TSC, CPU);
    304  1.1  kamil     break;
    305  1.1  kamil   default:
    306  1.1  kamil     // FIXME: Handle bugs.
    307  1.1  kamil     break;
    308  1.1  kamil   }
    309  1.1  kamil }
    310  1.1  kamil 
    311  1.1  kamil void profilingHandleArg1(int32_t FuncId, XRayEntryType Entry,
    312  1.1  kamil                          uint64_t) XRAY_NEVER_INSTRUMENT {
    313  1.1  kamil   return profilingHandleArg0(FuncId, Entry);
    314  1.1  kamil }
    315  1.1  kamil 
    316  1.1  kamil XRayLogInitStatus profilingFinalize() XRAY_NEVER_INSTRUMENT {
    317  1.1  kamil   s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED;
    318  1.1  kamil   if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus,
    319  1.1  kamil                                       XRayLogInitStatus::XRAY_LOG_FINALIZING,
    320  1.1  kamil                                       memory_order_release)) {
    321  1.1  kamil     if (Verbosity())
    322  1.1  kamil       Report("Cannot finalize profile, the profiling is not initialized.\n");
    323  1.1  kamil     return static_cast<XRayLogInitStatus>(CurrentStatus);
    324  1.1  kamil   }
    325  1.1  kamil 
    326  1.1  kamil   // Mark then finalize the current generation of buffers. This allows us to let
    327  1.1  kamil   // the threads currently holding onto new buffers still use them, but let the
    328  1.1  kamil   // last reference do the memory cleanup.
    329  1.1  kamil   DCHECK_NE(BQ, nullptr);
    330  1.1  kamil   BQ->finalize();
    331  1.1  kamil 
    332  1.1  kamil   // Wait a grace period to allow threads to see that we're finalizing.
    333  1.1  kamil   SleepForMillis(profilingFlags()->grace_period_ms);
    334  1.1  kamil 
    335  1.1  kamil   // If we for some reason are entering this function from an instrumented
    336  1.1  kamil   // handler, we bail out.
    337  1.1  kamil   RecursionGuard G(ReentranceGuard);
    338  1.1  kamil   if (!G)
    339  1.1  kamil     return static_cast<XRayLogInitStatus>(CurrentStatus);
    340  1.1  kamil 
    341  1.1  kamil   // Post the current thread's data if we have any.
    342  1.1  kamil   postCurrentThreadFCT(TLD);
    343  1.1  kamil 
    344  1.1  kamil   // Then we force serialize the log data.
    345  1.1  kamil   profileCollectorService::serialize();
    346  1.1  kamil 
    347  1.1  kamil   atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED,
    348  1.1  kamil                memory_order_release);
    349  1.1  kamil   return XRayLogInitStatus::XRAY_LOG_FINALIZED;
    350  1.1  kamil }
    351  1.1  kamil 
    352  1.1  kamil XRayLogInitStatus
    353  1.1  kamil profilingLoggingInit(size_t, size_t, void *Options,
    354  1.1  kamil                      size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
    355  1.1  kamil   RecursionGuard G(ReentranceGuard);
    356  1.1  kamil   if (!G)
    357  1.1  kamil     return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
    358  1.1  kamil 
    359  1.1  kamil   s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
    360  1.1  kamil   if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus,
    361  1.1  kamil                                       XRayLogInitStatus::XRAY_LOG_INITIALIZING,
    362  1.1  kamil                                       memory_order_acq_rel)) {
    363  1.1  kamil     if (Verbosity())
    364  1.1  kamil       Report("Cannot initialize already initialised profiling "
    365  1.1  kamil              "implementation.\n");
    366  1.1  kamil     return static_cast<XRayLogInitStatus>(CurrentStatus);
    367  1.1  kamil   }
    368  1.1  kamil 
    369  1.1  kamil   {
    370  1.1  kamil     SpinMutexLock Lock(&ProfilerOptionsMutex);
    371  1.1  kamil     FlagParser ConfigParser;
    372  1.1  kamil     ProfilerFlags Flags;
    373  1.1  kamil     Flags.setDefaults();
    374  1.1  kamil     registerProfilerFlags(&ConfigParser, &Flags);
    375  1.1  kamil     ConfigParser.ParseString(profilingCompilerDefinedFlags());
    376  1.1  kamil     const char *Env = GetEnv("XRAY_PROFILING_OPTIONS");
    377  1.1  kamil     if (Env == nullptr)
    378  1.1  kamil       Env = "";
    379  1.1  kamil     ConfigParser.ParseString(Env);
    380  1.1  kamil 
    381  1.1  kamil     // Then parse the configuration string provided.
    382  1.1  kamil     ConfigParser.ParseString(static_cast<const char *>(Options));
    383  1.1  kamil     if (Verbosity())
    384  1.1  kamil       ReportUnrecognizedFlags();
    385  1.1  kamil     *profilingFlags() = Flags;
    386  1.1  kamil   }
    387  1.1  kamil 
    388  1.1  kamil   // We need to reset the profile data collection implementation now.
    389  1.1  kamil   profileCollectorService::reset();
    390  1.1  kamil 
    391  1.1  kamil   // Then also reset the buffer queue implementation.
    392  1.1  kamil   if (BQ == nullptr) {
    393  1.1  kamil     bool Success = false;
    394  1.1  kamil     new (&BufferQueueStorage)
    395  1.1  kamil         BufferQueue(profilingFlags()->per_thread_allocator_max,
    396  1.1  kamil                     profilingFlags()->buffers_max, Success);
    397  1.1  kamil     if (!Success) {
    398  1.1  kamil       if (Verbosity())
    399  1.1  kamil         Report("Failed to initialize preallocated memory buffers!");
    400  1.1  kamil       atomic_store(&ProfilerLogStatus,
    401  1.1  kamil                    XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
    402  1.1  kamil                    memory_order_release);
    403  1.1  kamil       return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
    404  1.1  kamil     }
    405  1.1  kamil 
    406  1.1  kamil     // If we've succeded, set the global pointer to the initialised storage.
    407  1.1  kamil     BQ = reinterpret_cast<BufferQueue *>(&BufferQueueStorage);
    408  1.1  kamil   } else {
    409  1.1  kamil     BQ->finalize();
    410  1.1  kamil     auto InitStatus = BQ->init(profilingFlags()->per_thread_allocator_max,
    411  1.1  kamil                                profilingFlags()->buffers_max);
    412  1.1  kamil 
    413  1.1  kamil     if (InitStatus != BufferQueue::ErrorCode::Ok) {
    414  1.1  kamil       if (Verbosity())
    415  1.1  kamil         Report("Failed to initialize preallocated memory buffers; error: %s",
    416  1.1  kamil                BufferQueue::getErrorString(InitStatus));
    417  1.1  kamil       atomic_store(&ProfilerLogStatus,
    418  1.1  kamil                    XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
    419  1.1  kamil                    memory_order_release);
    420  1.1  kamil       return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
    421  1.1  kamil     }
    422  1.1  kamil 
    423  1.1  kamil     DCHECK(!BQ->finalizing());
    424  1.1  kamil   }
    425  1.1  kamil 
    426  1.1  kamil   // We need to set up the exit handlers.
    427  1.1  kamil   static pthread_once_t Once = PTHREAD_ONCE_INIT;
    428  1.1  kamil   pthread_once(
    429  1.1  kamil       &Once, +[] {
    430  1.1  kamil         pthread_key_create(
    431  1.1  kamil             &ProfilingKey, +[](void *P) XRAY_NEVER_INSTRUMENT {
    432  1.1  kamil               if (atomic_exchange(&ThreadExitingLatch, 1, memory_order_acq_rel))
    433  1.1  kamil                 return;
    434  1.1  kamil 
    435  1.1  kamil               if (P == nullptr)
    436  1.1  kamil                 return;
    437  1.1  kamil 
    438  1.1  kamil               auto T = reinterpret_cast<ProfilingData *>(P);
    439  1.1  kamil               if (atomic_load_relaxed(&T->Allocators) == 0)
    440  1.1  kamil                 return;
    441  1.1  kamil 
    442  1.1  kamil               {
    443  1.1  kamil                 // If we're somehow executing this while inside a
    444  1.1  kamil                 // non-reentrant-friendly context, we skip attempting to post
    445  1.1  kamil                 // the current thread's data.
    446  1.1  kamil                 RecursionGuard G(ReentranceGuard);
    447  1.1  kamil                 if (!G)
    448  1.1  kamil                   return;
    449  1.1  kamil 
    450  1.1  kamil                 postCurrentThreadFCT(*T);
    451  1.1  kamil               }
    452  1.1  kamil             });
    453  1.1  kamil 
    454  1.1  kamil         // We also need to set up an exit handler, so that we can get the
    455  1.1  kamil         // profile information at exit time. We use the C API to do this, to not
    456  1.1  kamil         // rely on C++ ABI functions for registering exit handlers.
    457  1.1  kamil         Atexit(+[]() XRAY_NEVER_INSTRUMENT {
    458  1.1  kamil           if (atomic_exchange(&ThreadExitingLatch, 1, memory_order_acq_rel))
    459  1.1  kamil             return;
    460  1.1  kamil 
    461  1.1  kamil           auto Cleanup =
    462  1.1  kamil               at_scope_exit([]() XRAY_NEVER_INSTRUMENT { cleanupTLD(); });
    463  1.1  kamil 
    464  1.1  kamil           // Finalize and flush.
    465  1.1  kamil           if (profilingFinalize() != XRAY_LOG_FINALIZED ||
    466  1.1  kamil               profilingFlush() != XRAY_LOG_FLUSHED)
    467  1.1  kamil             return;
    468  1.1  kamil 
    469  1.1  kamil           if (Verbosity())
    470  1.1  kamil             Report("XRay Profile flushed at exit.");
    471  1.1  kamil         });
    472  1.1  kamil       });
    473  1.1  kamil 
    474  1.1  kamil   __xray_log_set_buffer_iterator(profileCollectorService::nextBuffer);
    475  1.1  kamil   __xray_set_handler(profilingHandleArg0);
    476  1.1  kamil   __xray_set_handler_arg1(profilingHandleArg1);
    477  1.1  kamil 
    478  1.1  kamil   atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED,
    479  1.1  kamil                memory_order_release);
    480  1.1  kamil   if (Verbosity())
    481  1.1  kamil     Report("XRay Profiling init successful.\n");
    482  1.1  kamil 
    483  1.1  kamil   return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
    484  1.1  kamil }
    485  1.1  kamil 
    486  1.1  kamil bool profilingDynamicInitializer() XRAY_NEVER_INSTRUMENT {
    487  1.1  kamil   // Set up the flag defaults from the static defaults and the
    488  1.1  kamil   // compiler-provided defaults.
    489  1.1  kamil   {
    490  1.1  kamil     SpinMutexLock Lock(&ProfilerOptionsMutex);
    491  1.1  kamil     auto *F = profilingFlags();
    492  1.1  kamil     F->setDefaults();
    493  1.1  kamil     FlagParser ProfilingParser;
    494  1.1  kamil     registerProfilerFlags(&ProfilingParser, F);
    495  1.1  kamil     ProfilingParser.ParseString(profilingCompilerDefinedFlags());
    496  1.1  kamil   }
    497  1.1  kamil 
    498  1.1  kamil   XRayLogImpl Impl{
    499  1.1  kamil       profilingLoggingInit,
    500  1.1  kamil       profilingFinalize,
    501  1.1  kamil       profilingHandleArg0,
    502  1.1  kamil       profilingFlush,
    503  1.1  kamil   };
    504  1.1  kamil   auto RegistrationResult = __xray_log_register_mode("xray-profiling", Impl);
    505  1.1  kamil   if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) {
    506  1.1  kamil     if (Verbosity())
    507  1.1  kamil       Report("Cannot register XRay Profiling mode to 'xray-profiling'; error = "
    508  1.1  kamil              "%d\n",
    509  1.1  kamil              RegistrationResult);
    510  1.1  kamil     return false;
    511  1.1  kamil   }
    512  1.1  kamil 
    513  1.1  kamil   if (!internal_strcmp(flags()->xray_mode, "xray-profiling"))
    514  1.1  kamil     __xray_log_select_mode("xray_profiling");
    515  1.1  kamil   return true;
    516  1.1  kamil }
    517  1.1  kamil 
    518  1.1  kamil } // namespace __xray
    519  1.1  kamil 
    520  1.1  kamil static auto UNUSED Unused = __xray::profilingDynamicInitializer();
    521