Home | History | Annotate | Line # | Download | only in rtl
      1 //===-- tsan_platform_mac.cc ----------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 // Mac-specific code.
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "sanitizer_common/sanitizer_platform.h"
     16 #if SANITIZER_MAC
     17 
     18 #include "sanitizer_common/sanitizer_atomic.h"
     19 #include "sanitizer_common/sanitizer_common.h"
     20 #include "sanitizer_common/sanitizer_libc.h"
     21 #include "sanitizer_common/sanitizer_posix.h"
     22 #include "sanitizer_common/sanitizer_procmaps.h"
     23 #include "sanitizer_common/sanitizer_stackdepot.h"
     24 #include "tsan_platform.h"
     25 #include "tsan_rtl.h"
     26 #include "tsan_flags.h"
     27 
     28 #include <mach/mach.h>
     29 #include <pthread.h>
     30 #include <signal.h>
     31 #include <stdio.h>
     32 #include <stdlib.h>
     33 #include <string.h>
     34 #include <stdarg.h>
     35 #include <sys/mman.h>
     36 #include <sys/syscall.h>
     37 #include <sys/time.h>
     38 #include <sys/types.h>
     39 #include <sys/resource.h>
     40 #include <sys/stat.h>
     41 #include <unistd.h>
     42 #include <errno.h>
     43 #include <sched.h>
     44 
     45 namespace __tsan {
     46 
     47 #if !SANITIZER_GO
     48 static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
     49   atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
     50   void *val = (void *)atomic_load_relaxed(a);
     51   atomic_signal_fence(memory_order_acquire);  // Turns the previous load into
     52                                               // acquire wrt signals.
     53   if (UNLIKELY(val == nullptr)) {
     54     val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
     55                                 MAP_PRIVATE | MAP_ANON, -1, 0);
     56     CHECK(val);
     57     void *cmp = nullptr;
     58     if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
     59                                         memory_order_acq_rel)) {
     60       internal_munmap(val, size);
     61       val = cmp;
     62     }
     63   }
     64   return val;
     65 }
     66 
     67 // On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
     68 // problematic, because there are several places where interceptors are called
     69 // when TLVs are not accessible (early process startup, thread cleanup, ...).
     70 // The following provides a "poor man's TLV" implementation, where we use the
     71 // shadow memory of the pointer returned by pthread_self() to store a pointer to
     72 // the ThreadState object. The main thread's ThreadState is stored separately
     73 // in a static variable, because we need to access it even before the
     74 // shadow memory is set up.
     75 static uptr main_thread_identity = 0;
     76 ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
     77 
     78 ThreadState **cur_thread_location() {
     79   ThreadState **thread_identity = (ThreadState **)pthread_self();
     80   return ((uptr)thread_identity == main_thread_identity) ? nullptr
     81                                                          : thread_identity;
     82 }
     83 
     84 ThreadState *cur_thread() {
     85   ThreadState **thr_state_loc = cur_thread_location();
     86   if (thr_state_loc == nullptr || main_thread_identity == 0) {
     87     return (ThreadState *)&main_thread_state;
     88   }
     89   ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc);
     90   ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate(
     91       (uptr *)fake_tls, sizeof(ThreadState));
     92   return thr;
     93 }
     94 
     95 // TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
     96 // munmap first and then clear `fake_tls`; if we receive a signal in between,
     97 // handler will try to access the unmapped ThreadState.
     98 void cur_thread_finalize() {
     99   ThreadState **thr_state_loc = cur_thread_location();
    100   if (thr_state_loc == nullptr) {
    101     // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
    102     // exit the main thread. Let's keep the main thread's ThreadState.
    103     return;
    104   }
    105   ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc);
    106   internal_munmap(*fake_tls, sizeof(ThreadState));
    107   *fake_tls = nullptr;
    108 }
    109 #endif
    110 
    111 void FlushShadowMemory() {
    112 }
    113 
    114 static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
    115   vm_address_t address = start;
    116   vm_address_t end_address = end;
    117   uptr resident_pages = 0;
    118   uptr dirty_pages = 0;
    119   while (address < end_address) {
    120     vm_size_t vm_region_size;
    121     mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
    122     vm_region_extended_info_data_t vm_region_info;
    123     mach_port_t object_name;
    124     kern_return_t ret = vm_region_64(
    125         mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
    126         (vm_region_info_t)&vm_region_info, &count, &object_name);
    127     if (ret != KERN_SUCCESS) break;
    128 
    129     resident_pages += vm_region_info.pages_resident;
    130     dirty_pages += vm_region_info.pages_dirtied;
    131 
    132     address += vm_region_size;
    133   }
    134   *res = resident_pages * GetPageSizeCached();
    135   *dirty = dirty_pages * GetPageSizeCached();
    136 }
    137 
    138 void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
    139   uptr shadow_res, shadow_dirty;
    140   uptr meta_res, meta_dirty;
    141   uptr trace_res, trace_dirty;
    142   RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
    143   RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
    144   RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
    145 
    146 #if !SANITIZER_GO
    147   uptr low_res, low_dirty;
    148   uptr high_res, high_dirty;
    149   uptr heap_res, heap_dirty;
    150   RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
    151   RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
    152   RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
    153 #else  // !SANITIZER_GO
    154   uptr app_res, app_dirty;
    155   RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
    156 #endif
    157 
    158   StackDepotStats *stacks = StackDepotGetStats();
    159   internal_snprintf(buf, buf_size,
    160     "shadow   (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
    161     "meta     (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
    162     "traces   (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
    163 #if !SANITIZER_GO
    164     "low app  (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
    165     "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
    166     "heap     (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
    167 #else  // !SANITIZER_GO
    168     "app      (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
    169 #endif
    170     "stacks: %zd unique IDs, %zd kB allocated\n"
    171     "threads: %zd total, %zd live\n"
    172     "------------------------------\n",
    173     ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
    174     MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
    175     TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
    176 #if !SANITIZER_GO
    177     LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
    178     HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
    179     HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
    180 #else  // !SANITIZER_GO
    181     AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
    182 #endif
    183     stacks->n_uniq_ids, stacks->allocated / 1024,
    184     nthread, nlive);
    185 }
    186 
    187 #if !SANITIZER_GO
    188 void InitializeShadowMemoryPlatform() { }
    189 
    190 // On OS X, GCD worker threads are created without a call to pthread_create. We
    191 // need to properly register these threads with ThreadCreate and ThreadStart.
    192 // These threads don't have a parent thread, as they are created "spuriously".
    193 // We're using a libpthread API that notifies us about a newly created thread.
    194 // The `thread == pthread_self()` check indicates this is actually a worker
    195 // thread. If it's just a regular thread, this hook is called on the parent
    196 // thread.
    197 typedef void (*pthread_introspection_hook_t)(unsigned int event,
    198                                              pthread_t thread, void *addr,
    199                                              size_t size);
    200 extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
    201     pthread_introspection_hook_t hook);
    202 static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
    203 static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
    204 static pthread_introspection_hook_t prev_pthread_introspection_hook;
    205 static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
    206                                           void *addr, size_t size) {
    207   if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
    208     if (thread == pthread_self()) {
    209       // The current thread is a newly created GCD worker thread.
    210       ThreadState *thr = cur_thread();
    211       Processor *proc = ProcCreate();
    212       ProcWire(proc, thr);
    213       ThreadState *parent_thread_state = nullptr;  // No parent.
    214       int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
    215       CHECK_NE(tid, 0);
    216       ThreadStart(thr, tid, GetTid(), /*workerthread*/ true);
    217     }
    218   } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
    219     if (thread == pthread_self()) {
    220       ThreadState *thr = cur_thread();
    221       if (thr->tctx) {
    222         DestroyThreadState();
    223       }
    224     }
    225   }
    226 
    227   if (prev_pthread_introspection_hook != nullptr)
    228     prev_pthread_introspection_hook(event, thread, addr, size);
    229 }
    230 #endif
    231 
    232 void InitializePlatformEarly() {
    233 #if defined(__aarch64__)
    234   uptr max_vm = GetMaxUserVirtualAddress() + 1;
    235   if (max_vm != Mapping::kHiAppMemEnd) {
    236     Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
    237            max_vm, Mapping::kHiAppMemEnd);
    238     Die();
    239   }
    240 #endif
    241 }
    242 
    243 static const uptr kPthreadSetjmpXorKeySlot = 0x7;
    244 extern "C" uptr __tsan_darwin_setjmp_xor_key = 0;
    245 
    246 void InitializePlatform() {
    247   DisableCoreDumperIfNecessary();
    248 #if !SANITIZER_GO
    249   CheckAndProtect();
    250 
    251   CHECK_EQ(main_thread_identity, 0);
    252   main_thread_identity = (uptr)pthread_self();
    253 
    254   prev_pthread_introspection_hook =
    255       pthread_introspection_hook_install(&my_pthread_introspection_hook);
    256 #endif
    257 
    258   if (GetMacosVersion() >= MACOS_VERSION_MOJAVE) {
    259     __tsan_darwin_setjmp_xor_key =
    260         (uptr)pthread_getspecific(kPthreadSetjmpXorKeySlot);
    261   }
    262 }
    263 
    264 #if !SANITIZER_GO
    265 void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
    266   // The pointer to the ThreadState object is stored in the shadow memory
    267   // of the tls.
    268   uptr tls_end = tls_addr + tls_size;
    269   ThreadState **thr_state_loc = cur_thread_location();
    270   if (thr_state_loc == nullptr) {
    271     MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
    272   } else {
    273     uptr thr_state_start = (uptr)thr_state_loc;
    274     uptr thr_state_end = thr_state_start + sizeof(uptr);
    275     CHECK_GE(thr_state_start, tls_addr);
    276     CHECK_LE(thr_state_start, tls_addr + tls_size);
    277     CHECK_GE(thr_state_end, tls_addr);
    278     CHECK_LE(thr_state_end, tls_addr + tls_size);
    279     MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
    280                             thr_state_start - tls_addr);
    281     MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
    282                             tls_end - thr_state_end);
    283   }
    284 }
    285 #endif
    286 
    287 #if !SANITIZER_GO
    288 // Note: this function runs with async signals enabled,
    289 // so it must not touch any tsan state.
    290 int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
    291     void *abstime), void *c, void *m, void *abstime,
    292     void(*cleanup)(void *arg), void *arg) {
    293   // pthread_cleanup_push/pop are hardcore macros mess.
    294   // We can't intercept nor call them w/o including pthread.h.
    295   int res;
    296   pthread_cleanup_push(cleanup, arg);
    297   res = fn(c, m, abstime);
    298   pthread_cleanup_pop(0);
    299   return res;
    300 }
    301 #endif
    302 
    303 }  // namespace __tsan
    304 
    305 #endif  // SANITIZER_MAC
    306