Home | History | Annotate | Line # | Download | only in sanitizer_common
      1 //===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // See sanitizer_stoptheworld.h for details.
     11 // This implementation was inspired by Markus Gutschke's linuxthreads.cc.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "sanitizer_platform.h"
     16 
     17 #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
     18                         defined(__aarch64__) || defined(__powerpc64__) || \
     19                         defined(__s390__) || defined(__i386__) || \
     20                         defined(__arm__))
     21 
     22 #include "sanitizer_stoptheworld.h"
     23 
     24 #include "sanitizer_platform_limits_posix.h"
     25 #include "sanitizer_atomic.h"
     26 
     27 #include <errno.h>
     28 #include <sched.h> // for CLONE_* definitions
     29 #include <stddef.h>
     30 #include <sys/prctl.h> // for PR_* definitions
     31 #include <sys/ptrace.h> // for PTRACE_* definitions
     32 #include <sys/types.h> // for pid_t
     33 #include <sys/uio.h> // for iovec
     34 #include <elf.h> // for NT_PRSTATUS
     35 #if defined(__aarch64__) && !SANITIZER_ANDROID
     36 // GLIBC 2.20+ sys/user does not include asm/ptrace.h
     37 # include <asm/ptrace.h>
     38 #endif
     39 #include <sys/user.h>  // for user_regs_struct
     40 #if SANITIZER_ANDROID && SANITIZER_MIPS
     41 # include <asm/reg.h>  // for mips SP register in sys/user.h
     42 #endif
     43 #include <sys/wait.h> // for signal-related stuff
     44 
     45 #ifdef sa_handler
     46 # undef sa_handler
     47 #endif
     48 
     49 #ifdef sa_sigaction
     50 # undef sa_sigaction
     51 #endif
     52 
     53 #include "sanitizer_common.h"
     54 #include "sanitizer_flags.h"
     55 #include "sanitizer_libc.h"
     56 #include "sanitizer_linux.h"
     57 #include "sanitizer_mutex.h"
     58 #include "sanitizer_placement_new.h"
     59 
     60 // Sufficiently old kernel headers don't provide this value, but we can still
     61 // call prctl with it. If the runtime kernel is new enough, the prctl call will
     62 // have the desired effect; if the kernel is too old, the call will error and we
     63 // can ignore said error.
     64 #ifndef PR_SET_PTRACER
     65 #define PR_SET_PTRACER 0x59616d61
     66 #endif
     67 
     68 // This module works by spawning a Linux task which then attaches to every
     69 // thread in the caller process with ptrace. This suspends the threads, and
     70 // PTRACE_GETREGS can then be used to obtain their register state. The callback
     71 // supplied to StopTheWorld() is run in the tracer task while the threads are
     72 // suspended.
     73 // The tracer task must be placed in a different thread group for ptrace to
     74 // work, so it cannot be spawned as a pthread. Instead, we use the low-level
     75 // clone() interface (we want to share the address space with the caller
     76 // process, so we prefer clone() over fork()).
     77 //
     78 // We don't use any libc functions, relying instead on direct syscalls. There
     79 // are two reasons for this:
     80 // 1. calling a library function while threads are suspended could cause a
     81 // deadlock, if one of the treads happens to be holding a libc lock;
     82 // 2. it's generally not safe to call libc functions from the tracer task,
     83 // because clone() does not set up a thread-local storage for it. Any
     84 // thread-local variables used by libc will be shared between the tracer task
     85 // and the thread which spawned it.
     86 
     87 namespace __sanitizer {
     88 
     89 class SuspendedThreadsListLinux : public SuspendedThreadsList {
     90  public:
     91   SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
     92 
     93   tid_t GetThreadID(uptr index) const;
     94   uptr ThreadCount() const;
     95   bool ContainsTid(tid_t thread_id) const;
     96   void Append(tid_t tid);
     97 
     98   PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
     99                                           uptr *sp) const;
    100   uptr RegisterCount() const;
    101 
    102  private:
    103   InternalMmapVector<tid_t> thread_ids_;
    104 };
    105 
    106 // Structure for passing arguments into the tracer thread.
    107 struct TracerThreadArgument {
    108   StopTheWorldCallback callback;
    109   void *callback_argument;
    110   // The tracer thread waits on this mutex while the parent finishes its
    111   // preparations.
    112   BlockingMutex mutex;
    113   // Tracer thread signals its completion by setting done.
    114   atomic_uintptr_t done;
    115   uptr parent_pid;
    116 };
    117 
    118 // This class handles thread suspending/unsuspending in the tracer thread.
    119 class ThreadSuspender {
    120  public:
    121   explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
    122     : arg(arg)
    123     , pid_(pid) {
    124       CHECK_GE(pid, 0);
    125     }
    126   bool SuspendAllThreads();
    127   void ResumeAllThreads();
    128   void KillAllThreads();
    129   SuspendedThreadsListLinux &suspended_threads_list() {
    130     return suspended_threads_list_;
    131   }
    132   TracerThreadArgument *arg;
    133  private:
    134   SuspendedThreadsListLinux suspended_threads_list_;
    135   pid_t pid_;
    136   bool SuspendThread(tid_t thread_id);
    137 };
    138 
    139 bool ThreadSuspender::SuspendThread(tid_t tid) {
    140   // Are we already attached to this thread?
    141   // Currently this check takes linear time, however the number of threads is
    142   // usually small.
    143   if (suspended_threads_list_.ContainsTid(tid)) return false;
    144   int pterrno;
    145   if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
    146                        &pterrno)) {
    147     // Either the thread is dead, or something prevented us from attaching.
    148     // Log this event and move on.
    149     VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid,
    150             pterrno);
    151     return false;
    152   } else {
    153     VReport(2, "Attached to thread %zu.\n", (uptr)tid);
    154     // The thread is not guaranteed to stop before ptrace returns, so we must
    155     // wait on it. Note: if the thread receives a signal concurrently,
    156     // we can get notification about the signal before notification about stop.
    157     // In such case we need to forward the signal to the thread, otherwise
    158     // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
    159     // any logic relying on signals will break. After forwarding we need to
    160     // continue to wait for stopping, because the thread is not stopped yet.
    161     // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
    162     // as invisible as possible.
    163     for (;;) {
    164       int status;
    165       uptr waitpid_status;
    166       HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
    167       int wperrno;
    168       if (internal_iserror(waitpid_status, &wperrno)) {
    169         // Got a ECHILD error. I don't think this situation is possible, but it
    170         // doesn't hurt to report it.
    171         VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
    172                 (uptr)tid, wperrno);
    173         internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
    174         return false;
    175       }
    176       if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
    177         internal_ptrace(PTRACE_CONT, tid, nullptr,
    178                         (void*)(uptr)WSTOPSIG(status));
    179         continue;
    180       }
    181       break;
    182     }
    183     suspended_threads_list_.Append(tid);
    184     return true;
    185   }
    186 }
    187 
    188 void ThreadSuspender::ResumeAllThreads() {
    189   for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {
    190     pid_t tid = suspended_threads_list_.GetThreadID(i);
    191     int pterrno;
    192     if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
    193                           &pterrno)) {
    194       VReport(2, "Detached from thread %d.\n", tid);
    195     } else {
    196       // Either the thread is dead, or we are already detached.
    197       // The latter case is possible, for instance, if this function was called
    198       // from a signal handler.
    199       VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
    200     }
    201   }
    202 }
    203 
    204 void ThreadSuspender::KillAllThreads() {
    205   for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)
    206     internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
    207                     nullptr, nullptr);
    208 }
    209 
    210 bool ThreadSuspender::SuspendAllThreads() {
    211   ThreadLister thread_lister(pid_);
    212   bool retry = true;
    213   InternalMmapVector<tid_t> threads;
    214   threads.reserve(128);
    215   for (int i = 0; i < 30 && retry; ++i) {
    216     retry = false;
    217     switch (thread_lister.ListThreads(&threads)) {
    218       case ThreadLister::Error:
    219         ResumeAllThreads();
    220         return false;
    221       case ThreadLister::Incomplete:
    222         retry = true;
    223         break;
    224       case ThreadLister::Ok:
    225         break;
    226     }
    227     for (tid_t tid : threads)
    228       if (SuspendThread(tid))
    229         retry = true;
    230   };
    231   return suspended_threads_list_.ThreadCount();
    232 }
    233 
    234 // Pointer to the ThreadSuspender instance for use in signal handler.
    235 static ThreadSuspender *thread_suspender_instance = nullptr;
    236 
    237 // Synchronous signals that should not be blocked.
    238 static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
    239                                     SIGXCPU, SIGXFSZ };
    240 
    241 static void TracerThreadDieCallback() {
    242   // Generally a call to Die() in the tracer thread should be fatal to the
    243   // parent process as well, because they share the address space.
    244   // This really only works correctly if all the threads are suspended at this
    245   // point. So we correctly handle calls to Die() from within the callback, but
    246   // not those that happen before or after the callback. Hopefully there aren't
    247   // a lot of opportunities for that to happen...
    248   ThreadSuspender *inst = thread_suspender_instance;
    249   if (inst && stoptheworld_tracer_pid == internal_getpid()) {
    250     inst->KillAllThreads();
    251     thread_suspender_instance = nullptr;
    252   }
    253 }
    254 
    255 // Signal handler to wake up suspended threads when the tracer thread dies.
    256 static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
    257                                       void *uctx) {
    258   SignalContext ctx(siginfo, uctx);
    259   Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
    260          ctx.addr, ctx.pc, ctx.sp);
    261   ThreadSuspender *inst = thread_suspender_instance;
    262   if (inst) {
    263     if (signum == SIGABRT)
    264       inst->KillAllThreads();
    265     else
    266       inst->ResumeAllThreads();
    267     RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
    268     thread_suspender_instance = nullptr;
    269     atomic_store(&inst->arg->done, 1, memory_order_relaxed);
    270   }
    271   internal__exit((signum == SIGABRT) ? 1 : 2);
    272 }
    273 
    274 // Size of alternative stack for signal handlers in the tracer thread.
    275 static const int kHandlerStackSize = 8192;
    276 
    277 // This function will be run as a cloned task.
    278 static int TracerThread(void* argument) {
    279   TracerThreadArgument *tracer_thread_argument =
    280       (TracerThreadArgument *)argument;
    281 
    282   internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
    283   // Check if parent is already dead.
    284   if (internal_getppid() != tracer_thread_argument->parent_pid)
    285     internal__exit(4);
    286 
    287   // Wait for the parent thread to finish preparations.
    288   tracer_thread_argument->mutex.Lock();
    289   tracer_thread_argument->mutex.Unlock();
    290 
    291   RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
    292 
    293   ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
    294   // Global pointer for the signal handler.
    295   thread_suspender_instance = &thread_suspender;
    296 
    297   // Alternate stack for signal handling.
    298   InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
    299   stack_t handler_stack;
    300   internal_memset(&handler_stack, 0, sizeof(handler_stack));
    301   handler_stack.ss_sp = handler_stack_memory.data();
    302   handler_stack.ss_size = kHandlerStackSize;
    303   internal_sigaltstack(&handler_stack, nullptr);
    304 
    305   // Install our handler for synchronous signals. Other signals should be
    306   // blocked by the mask we inherited from the parent thread.
    307   for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
    308     __sanitizer_sigaction act;
    309     internal_memset(&act, 0, sizeof(act));
    310     act.sigaction = TracerThreadSignalHandler;
    311     act.sa_flags = SA_ONSTACK | SA_SIGINFO;
    312     internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
    313   }
    314 
    315   int exit_code = 0;
    316   if (!thread_suspender.SuspendAllThreads()) {
    317     VReport(1, "Failed suspending threads.\n");
    318     exit_code = 3;
    319   } else {
    320     tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
    321                                      tracer_thread_argument->callback_argument);
    322     thread_suspender.ResumeAllThreads();
    323     exit_code = 0;
    324   }
    325   RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
    326   thread_suspender_instance = nullptr;
    327   atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
    328   return exit_code;
    329 }
    330 
    331 class ScopedStackSpaceWithGuard {
    332  public:
    333   explicit ScopedStackSpaceWithGuard(uptr stack_size) {
    334     stack_size_ = stack_size;
    335     guard_size_ = GetPageSizeCached();
    336     // FIXME: Omitting MAP_STACK here works in current kernels but might break
    337     // in the future.
    338     guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
    339                                    "ScopedStackWithGuard");
    340     CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
    341   }
    342   ~ScopedStackSpaceWithGuard() {
    343     UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
    344   }
    345   void *Bottom() const {
    346     return (void *)(guard_start_ + stack_size_ + guard_size_);
    347   }
    348 
    349  private:
    350   uptr stack_size_;
    351   uptr guard_size_;
    352   uptr guard_start_;
    353 };
    354 
    355 // We have a limitation on the stack frame size, so some stuff had to be moved
    356 // into globals.
    357 static __sanitizer_sigset_t blocked_sigset;
    358 static __sanitizer_sigset_t old_sigset;
    359 
    360 class StopTheWorldScope {
    361  public:
    362   StopTheWorldScope() {
    363     // Make this process dumpable. Processes that are not dumpable cannot be
    364     // attached to.
    365     process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
    366     if (!process_was_dumpable_)
    367       internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
    368   }
    369 
    370   ~StopTheWorldScope() {
    371     // Restore the dumpable flag.
    372     if (!process_was_dumpable_)
    373       internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
    374   }
    375 
    376  private:
    377   int process_was_dumpable_;
    378 };
    379 
    380 // When sanitizer output is being redirected to file (i.e. by using log_path),
    381 // the tracer should write to the parent's log instead of trying to open a new
    382 // file. Alert the logging code to the fact that we have a tracer.
    383 struct ScopedSetTracerPID {
    384   explicit ScopedSetTracerPID(uptr tracer_pid) {
    385     stoptheworld_tracer_pid = tracer_pid;
    386     stoptheworld_tracer_ppid = internal_getpid();
    387   }
    388   ~ScopedSetTracerPID() {
    389     stoptheworld_tracer_pid = 0;
    390     stoptheworld_tracer_ppid = 0;
    391   }
    392 };
    393 
    394 void StopTheWorld(StopTheWorldCallback callback, void *argument) {
    395   StopTheWorldScope in_stoptheworld;
    396   // Prepare the arguments for TracerThread.
    397   struct TracerThreadArgument tracer_thread_argument;
    398   tracer_thread_argument.callback = callback;
    399   tracer_thread_argument.callback_argument = argument;
    400   tracer_thread_argument.parent_pid = internal_getpid();
    401   atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
    402   const uptr kTracerStackSize = 2 * 1024 * 1024;
    403   ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
    404   // Block the execution of TracerThread until after we have set ptrace
    405   // permissions.
    406   tracer_thread_argument.mutex.Lock();
    407   // Signal handling story.
    408   // We don't want async signals to be delivered to the tracer thread,
    409   // so we block all async signals before creating the thread. An async signal
    410   // handler can temporary modify errno, which is shared with this thread.
    411   // We ought to use pthread_sigmask here, because sigprocmask has undefined
    412   // behavior in multithreaded programs. However, on linux sigprocmask is
    413   // equivalent to pthread_sigmask with the exception that pthread_sigmask
    414   // does not allow to block some signals used internally in pthread
    415   // implementation. We are fine with blocking them here, we are really not
    416   // going to pthread_cancel the thread.
    417   // The tracer thread should not raise any synchronous signals. But in case it
    418   // does, we setup a special handler for sync signals that properly kills the
    419   // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
    420   // in the tracer thread won't interfere with user program. Double note: if a
    421   // user does something along the lines of 'kill -11 pid', that can kill the
    422   // process even if user setup own handler for SEGV.
    423   // Thing to watch out for: this code should not change behavior of user code
    424   // in any observable way. In particular it should not override user signal
    425   // handlers.
    426   internal_sigfillset(&blocked_sigset);
    427   for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
    428     internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
    429   int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
    430   CHECK_EQ(rv, 0);
    431   uptr tracer_pid = internal_clone(
    432       TracerThread, tracer_stack.Bottom(),
    433       CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
    434       &tracer_thread_argument, nullptr /* parent_tidptr */,
    435       nullptr /* newtls */, nullptr /* child_tidptr */);
    436   internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
    437   int local_errno = 0;
    438   if (internal_iserror(tracer_pid, &local_errno)) {
    439     VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
    440     tracer_thread_argument.mutex.Unlock();
    441   } else {
    442     ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
    443     // On some systems we have to explicitly declare that we want to be traced
    444     // by the tracer thread.
    445     internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
    446     // Allow the tracer thread to start.
    447     tracer_thread_argument.mutex.Unlock();
    448     // NOTE: errno is shared between this thread and the tracer thread.
    449     // internal_waitpid() may call syscall() which can access/spoil errno,
    450     // so we can't call it now. Instead we for the tracer thread to finish using
    451     // the spin loop below. Man page for sched_yield() says "In the Linux
    452     // implementation, sched_yield() always succeeds", so let's hope it does not
    453     // spoil errno. Note that this spin loop runs only for brief periods before
    454     // the tracer thread has suspended us and when it starts unblocking threads.
    455     while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
    456       sched_yield();
    457     // Now the tracer thread is about to exit and does not touch errno,
    458     // wait for it.
    459     for (;;) {
    460       uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
    461       if (!internal_iserror(waitpid_status, &local_errno))
    462         break;
    463       if (local_errno == EINTR)
    464         continue;
    465       VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
    466               local_errno);
    467       break;
    468     }
    469   }
    470 }
    471 
    472 // Platform-specific methods from SuspendedThreadsList.
    473 #if SANITIZER_ANDROID && defined(__arm__)
    474 typedef pt_regs regs_struct;
    475 #define REG_SP ARM_sp
    476 
    477 #elif SANITIZER_LINUX && defined(__arm__)
    478 typedef user_regs regs_struct;
    479 #define REG_SP uregs[13]
    480 
    481 #elif defined(__i386__) || defined(__x86_64__)
    482 typedef user_regs_struct regs_struct;
    483 #if defined(__i386__)
    484 #define REG_SP esp
    485 #else
    486 #define REG_SP rsp
    487 #endif
    488 
    489 #elif defined(__powerpc__) || defined(__powerpc64__)
    490 typedef pt_regs regs_struct;
    491 #define REG_SP gpr[PT_R1]
    492 
    493 #elif defined(__mips__)
    494 typedef struct user regs_struct;
    495 # if SANITIZER_ANDROID
    496 #  define REG_SP regs[EF_R29]
    497 # else
    498 #  define REG_SP regs[EF_REG29]
    499 # endif
    500 
    501 #elif defined(__aarch64__)
    502 typedef struct user_pt_regs regs_struct;
    503 #define REG_SP sp
    504 #define ARCH_IOVEC_FOR_GETREGSET
    505 
    506 #elif defined(__s390__)
    507 typedef _user_regs_struct regs_struct;
    508 #define REG_SP gprs[15]
    509 #define ARCH_IOVEC_FOR_GETREGSET
    510 
    511 #else
    512 #error "Unsupported architecture"
    513 #endif // SANITIZER_ANDROID && defined(__arm__)
    514 
    515 tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {
    516   CHECK_LT(index, thread_ids_.size());
    517   return thread_ids_[index];
    518 }
    519 
    520 uptr SuspendedThreadsListLinux::ThreadCount() const {
    521   return thread_ids_.size();
    522 }
    523 
    524 bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {
    525   for (uptr i = 0; i < thread_ids_.size(); i++) {
    526     if (thread_ids_[i] == thread_id) return true;
    527   }
    528   return false;
    529 }
    530 
    531 void SuspendedThreadsListLinux::Append(tid_t tid) {
    532   thread_ids_.push_back(tid);
    533 }
    534 
    535 PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
    536     uptr index, uptr *buffer, uptr *sp) const {
    537   pid_t tid = GetThreadID(index);
    538   regs_struct regs;
    539   int pterrno;
    540 #ifdef ARCH_IOVEC_FOR_GETREGSET
    541   struct iovec regset_io;
    542   regset_io.iov_base = &regs;
    543   regset_io.iov_len = sizeof(regs_struct);
    544   bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
    545                                 (void*)NT_PRSTATUS, (void*)&regset_io),
    546                                 &pterrno);
    547 #else
    548   bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
    549                                 &regs), &pterrno);
    550 #endif
    551   if (isErr) {
    552     VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
    553             pterrno);
    554     // ESRCH means that the given thread is not suspended or already dead.
    555     // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
    556     // we should notify caller about this.
    557     return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
    558                             : REGISTERS_UNAVAILABLE;
    559   }
    560 
    561   *sp = regs.REG_SP;
    562   internal_memcpy(buffer, &regs, sizeof(regs));
    563   return REGISTERS_AVAILABLE;
    564 }
    565 
    566 uptr SuspendedThreadsListLinux::RegisterCount() const {
    567   return sizeof(regs_struct) / sizeof(uptr);
    568 }
    569 } // namespace __sanitizer
    570 
    571 #endif  // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
    572         // || defined(__aarch64__) || defined(__powerpc64__)
    573         // || defined(__s390__) || defined(__i386__) || defined(__arm__)
    574