Home | History | Annotate | Line # | Download | only in sanitizer_common
      1 //===-- sanitizer_fuchsia.cc ----------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is shared between AddressSanitizer and other sanitizer
     11 // run-time libraries and implements Fuchsia-specific functions from
     12 // sanitizer_common.h.
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "sanitizer_fuchsia.h"
     16 #if SANITIZER_FUCHSIA
     17 
     18 #include "sanitizer_common.h"
     19 #include "sanitizer_libc.h"
     20 #include "sanitizer_mutex.h"
     21 
     22 #include <limits.h>
     23 #include <pthread.h>
     24 #include <stdlib.h>
     25 #include <unistd.h>
     26 #include <zircon/errors.h>
     27 #include <zircon/process.h>
     28 #include <zircon/syscalls.h>
     29 
     30 namespace __sanitizer {
     31 
     32 void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
     33 
     34 uptr internal_sched_yield() {
     35   zx_status_t status = _zx_nanosleep(0);
     36   CHECK_EQ(status, ZX_OK);
     37   return 0;  // Why doesn't this return void?
     38 }
     39 
     40 static void internal_nanosleep(zx_time_t ns) {
     41   zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
     42   CHECK_EQ(status, ZX_OK);
     43 }
     44 
     45 unsigned int internal_sleep(unsigned int seconds) {
     46   internal_nanosleep(ZX_SEC(seconds));
     47   return 0;
     48 }
     49 
     50 u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); }
     51 
     52 u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); }
     53 
     54 uptr internal_getpid() {
     55   zx_info_handle_basic_t info;
     56   zx_status_t status =
     57       _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
     58                           sizeof(info), NULL, NULL);
     59   CHECK_EQ(status, ZX_OK);
     60   uptr pid = static_cast<uptr>(info.koid);
     61   CHECK_EQ(pid, info.koid);
     62   return pid;
     63 }
     64 
     65 uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
     66 
     67 tid_t GetTid() { return GetThreadSelf(); }
     68 
     69 void Abort() { abort(); }
     70 
     71 int Atexit(void (*function)(void)) { return atexit(function); }
     72 
     73 void SleepForSeconds(int seconds) { internal_sleep(seconds); }
     74 
     75 void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
     76 
     77 void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
     78   pthread_attr_t attr;
     79   CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
     80   void *base;
     81   size_t size;
     82   CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
     83   CHECK_EQ(pthread_attr_destroy(&attr), 0);
     84 
     85   *stack_bottom = reinterpret_cast<uptr>(base);
     86   *stack_top = *stack_bottom + size;
     87 }
     88 
     89 void InitializePlatformEarly() {}
     90 void MaybeReexec() {}
     91 void CheckASLR() {}
     92 void CheckMPROTECT() {}
     93 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
     94 void DisableCoreDumperIfNecessary() {}
     95 void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
     96 void SetAlternateSignalStack() {}
     97 void UnsetAlternateSignalStack() {}
     98 void InitTlsSize() {}
     99 
    100 void PrintModuleMap() {}
    101 
    102 bool SignalContext::IsStackOverflow() const { return false; }
    103 void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
    104 const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
    105 
    106 enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
    107 
    108 BlockingMutex::BlockingMutex() {
    109   // NOTE!  It's important that this use internal_memset, because plain
    110   // memset might be intercepted (e.g., actually be __asan_memset).
    111   // Defining this so the compiler initializes each field, e.g.:
    112   //   BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
    113   // might result in the compiler generating a call to memset, which would
    114   // have the same problem.
    115   internal_memset(this, 0, sizeof(*this));
    116 }
    117 
    118 void BlockingMutex::Lock() {
    119   CHECK_EQ(owner_, 0);
    120   atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
    121   if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
    122     return;
    123   while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
    124     zx_status_t status =
    125         _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
    126                        ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
    127     if (status != ZX_ERR_BAD_STATE)  // Normal race.
    128       CHECK_EQ(status, ZX_OK);
    129   }
    130 }
    131 
    132 void BlockingMutex::Unlock() {
    133   atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
    134   u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
    135   CHECK_NE(v, MtxUnlocked);
    136   if (v == MtxSleeping) {
    137     zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
    138     CHECK_EQ(status, ZX_OK);
    139   }
    140 }
    141 
    142 void BlockingMutex::CheckLocked() {
    143   atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
    144   CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
    145 }
    146 
    147 uptr GetPageSize() { return PAGE_SIZE; }
    148 
    149 uptr GetMmapGranularity() { return PAGE_SIZE; }
    150 
    151 sanitizer_shadow_bounds_t ShadowBounds;
    152 
    153 uptr GetMaxUserVirtualAddress() {
    154   ShadowBounds = __sanitizer_shadow_bounds();
    155   return ShadowBounds.memory_limit - 1;
    156 }
    157 
    158 uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
    159 
    160 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
    161                                   bool raw_report, bool die_for_nomem) {
    162   size = RoundUpTo(size, PAGE_SIZE);
    163 
    164   zx_handle_t vmo;
    165   zx_status_t status = _zx_vmo_create(size, 0, &vmo);
    166   if (status != ZX_OK) {
    167     if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
    168       ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
    169                               raw_report);
    170     return nullptr;
    171   }
    172   _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
    173                           internal_strlen(mem_type));
    174 
    175   // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
    176   uintptr_t addr;
    177   status =
    178       _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
    179                    vmo, 0, size, &addr);
    180   _zx_handle_close(vmo);
    181 
    182   if (status != ZX_OK) {
    183     if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
    184       ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
    185                               raw_report);
    186     return nullptr;
    187   }
    188 
    189   IncreaseTotalMmap(size);
    190 
    191   return reinterpret_cast<void *>(addr);
    192 }
    193 
    194 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
    195   return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
    196 }
    197 
    198 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
    199   return MmapOrDie(size, mem_type);
    200 }
    201 
    202 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
    203   return DoAnonymousMmapOrDie(size, mem_type, false, false);
    204 }
    205 
    206 uptr ReservedAddressRange::Init(uptr init_size, const char *name,
    207                                 uptr fixed_addr) {
    208   init_size = RoundUpTo(init_size, PAGE_SIZE);
    209   DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
    210   uintptr_t base;
    211   zx_handle_t vmar;
    212   zx_status_t status =
    213       _zx_vmar_allocate(
    214           _zx_vmar_root_self(),
    215           ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
    216           0, init_size, &vmar, &base);
    217   if (status != ZX_OK)
    218     ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
    219   base_ = reinterpret_cast<void *>(base);
    220   size_ = init_size;
    221   name_ = name;
    222   os_handle_ = vmar;
    223 
    224   return reinterpret_cast<uptr>(base_);
    225 }
    226 
    227 static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
    228                              void *base, const char *name, bool die_for_nomem) {
    229   uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
    230   map_size = RoundUpTo(map_size, PAGE_SIZE);
    231   zx_handle_t vmo;
    232   zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
    233   if (status != ZX_OK) {
    234     if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
    235       ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
    236     return 0;
    237   }
    238   _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
    239   DCHECK_GE(base + size_, map_size + offset);
    240   uintptr_t addr;
    241 
    242   status =
    243       _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
    244                    offset, vmo, 0, map_size, &addr);
    245   _zx_handle_close(vmo);
    246   if (status != ZX_OK) {
    247     if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
    248       ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
    249     }
    250     return 0;
    251   }
    252   IncreaseTotalMmap(map_size);
    253   return addr;
    254 }
    255 
    256 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
    257   return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
    258                           name_, false);
    259 }
    260 
    261 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
    262   return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
    263                           name_, true);
    264 }
    265 
    266 void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
    267   if (!addr || !size) return;
    268   size = RoundUpTo(size, PAGE_SIZE);
    269 
    270   zx_status_t status =
    271       _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
    272   if (status != ZX_OK) {
    273     Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
    274            SanitizerToolName, size, size, addr);
    275     CHECK("unable to unmap" && 0);
    276   }
    277 
    278   DecreaseTotalMmap(size);
    279 }
    280 
    281 void ReservedAddressRange::Unmap(uptr addr, uptr size) {
    282   CHECK_LE(size, size_);
    283   const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
    284   if (addr == reinterpret_cast<uptr>(base_)) {
    285     if (size == size_) {
    286       // Destroying the vmar effectively unmaps the whole mapping.
    287       _zx_vmar_destroy(vmar);
    288       _zx_handle_close(vmar);
    289       os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
    290       DecreaseTotalMmap(size);
    291       return;
    292     }
    293   } else {
    294     CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
    295   }
    296   // Partial unmapping does not affect the fact that the initial range is still
    297   // reserved, and the resulting unmapped memory can't be reused.
    298   UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
    299 }
    300 
    301 // This should never be called.
    302 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
    303   UNIMPLEMENTED();
    304 }
    305 
    306 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
    307                                    const char *mem_type) {
    308   CHECK_GE(size, PAGE_SIZE);
    309   CHECK(IsPowerOfTwo(size));
    310   CHECK(IsPowerOfTwo(alignment));
    311 
    312   zx_handle_t vmo;
    313   zx_status_t status = _zx_vmo_create(size, 0, &vmo);
    314   if (status != ZX_OK) {
    315     if (status != ZX_ERR_NO_MEMORY)
    316       ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
    317     return nullptr;
    318   }
    319   _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
    320                           internal_strlen(mem_type));
    321 
    322   // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
    323 
    324   // Map a larger size to get a chunk of address space big enough that
    325   // it surely contains an aligned region of the requested size.  Then
    326   // overwrite the aligned middle portion with a mapping from the
    327   // beginning of the VMO, and unmap the excess before and after.
    328   size_t map_size = size + alignment;
    329   uintptr_t addr;
    330   status =
    331       _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
    332                    vmo, 0, map_size, &addr);
    333   if (status == ZX_OK) {
    334     uintptr_t map_addr = addr;
    335     uintptr_t map_end = map_addr + map_size;
    336     addr = RoundUpTo(map_addr, alignment);
    337     uintptr_t end = addr + size;
    338     if (addr != map_addr) {
    339       zx_info_vmar_t info;
    340       status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
    341                                    sizeof(info), NULL, NULL);
    342       if (status == ZX_OK) {
    343         uintptr_t new_addr;
    344         status = _zx_vmar_map(
    345             _zx_vmar_root_self(),
    346             ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
    347             addr - info.base, vmo, 0, size, &new_addr);
    348         if (status == ZX_OK) CHECK_EQ(new_addr, addr);
    349       }
    350     }
    351     if (status == ZX_OK && addr != map_addr)
    352       status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
    353     if (status == ZX_OK && end != map_end)
    354       status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
    355   }
    356   _zx_handle_close(vmo);
    357 
    358   if (status != ZX_OK) {
    359     if (status != ZX_ERR_NO_MEMORY)
    360       ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
    361     return nullptr;
    362   }
    363 
    364   IncreaseTotalMmap(size);
    365 
    366   return reinterpret_cast<void *>(addr);
    367 }
    368 
    369 void UnmapOrDie(void *addr, uptr size) {
    370   UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
    371 }
    372 
    373 // This is used on the shadow mapping, which cannot be changed.
    374 // Zircon doesn't have anything like MADV_DONTNEED.
    375 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
    376 
    377 void DumpProcessMap() {
    378   // TODO(mcgrathr): write it
    379   return;
    380 }
    381 
    382 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
    383   // TODO(mcgrathr): Figure out a better way.
    384   zx_handle_t vmo;
    385   zx_status_t status = _zx_vmo_create(size, 0, &vmo);
    386   if (status == ZX_OK) {
    387     status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
    388     _zx_handle_close(vmo);
    389   }
    390   return status == ZX_OK;
    391 }
    392 
    393 // FIXME implement on this platform.
    394 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
    395 
    396 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
    397                       uptr *read_len, uptr max_len, error_t *errno_p) {
    398   zx_handle_t vmo;
    399   zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
    400   if (status == ZX_OK) {
    401     uint64_t vmo_size;
    402     status = _zx_vmo_get_size(vmo, &vmo_size);
    403     if (status == ZX_OK) {
    404       if (vmo_size < max_len) max_len = vmo_size;
    405       size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
    406       uintptr_t addr;
    407       status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
    408                             map_size, &addr);
    409       if (status == ZX_OK) {
    410         *buff = reinterpret_cast<char *>(addr);
    411         *buff_size = map_size;
    412         *read_len = max_len;
    413       }
    414     }
    415     _zx_handle_close(vmo);
    416   }
    417   if (status != ZX_OK && errno_p) *errno_p = status;
    418   return status == ZX_OK;
    419 }
    420 
    421 void RawWrite(const char *buffer) {
    422   constexpr size_t size = 128;
    423   static _Thread_local char line[size];
    424   static _Thread_local size_t lastLineEnd = 0;
    425   static _Thread_local size_t cur = 0;
    426 
    427   while (*buffer) {
    428     if (cur >= size) {
    429       if (lastLineEnd == 0)
    430         lastLineEnd = size;
    431       __sanitizer_log_write(line, lastLineEnd);
    432       internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
    433       cur = cur - lastLineEnd;
    434       lastLineEnd = 0;
    435     }
    436     if (*buffer == '\n')
    437       lastLineEnd = cur + 1;
    438     line[cur++] = *buffer++;
    439   }
    440   // Flush all complete lines before returning.
    441   if (lastLineEnd != 0) {
    442     __sanitizer_log_write(line, lastLineEnd);
    443     internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
    444     cur = cur - lastLineEnd;
    445     lastLineEnd = 0;
    446   }
    447 }
    448 
    449 void CatastrophicErrorWrite(const char *buffer, uptr length) {
    450   __sanitizer_log_write(buffer, length);
    451 }
    452 
    453 char **StoredArgv;
    454 char **StoredEnviron;
    455 
    456 char **GetArgv() { return StoredArgv; }
    457 char **GetEnviron() { return StoredEnviron; }
    458 
    459 const char *GetEnv(const char *name) {
    460   if (StoredEnviron) {
    461     uptr NameLen = internal_strlen(name);
    462     for (char **Env = StoredEnviron; *Env != 0; Env++) {
    463       if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
    464         return (*Env) + NameLen + 1;
    465     }
    466   }
    467   return nullptr;
    468 }
    469 
    470 uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
    471   const char *argv0 = "<UNKNOWN>";
    472   if (StoredArgv && StoredArgv[0]) {
    473     argv0 = StoredArgv[0];
    474   }
    475   internal_strncpy(buf, argv0, buf_len);
    476   return internal_strlen(buf);
    477 }
    478 
    479 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
    480   return ReadBinaryName(buf, buf_len);
    481 }
    482 
    483 uptr MainThreadStackBase, MainThreadStackSize;
    484 
    485 bool GetRandom(void *buffer, uptr length, bool blocking) {
    486   CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
    487   _zx_cprng_draw(buffer, length);
    488   return true;
    489 }
    490 
    491 u32 GetNumberOfCPUs() {
    492   return zx_system_get_num_cpus();
    493 }
    494 
    495 uptr GetRSS() { UNIMPLEMENTED(); }
    496 
    497 }  // namespace __sanitizer
    498 
    499 using namespace __sanitizer;  // NOLINT
    500 
    501 extern "C" {
    502 void __sanitizer_startup_hook(int argc, char **argv, char **envp,
    503                               void *stack_base, size_t stack_size) {
    504   __sanitizer::StoredArgv = argv;
    505   __sanitizer::StoredEnviron = envp;
    506   __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
    507   __sanitizer::MainThreadStackSize = stack_size;
    508 }
    509 
    510 void __sanitizer_set_report_path(const char *path) {
    511   // Handle the initialization code in each sanitizer, but no other calls.
    512   // This setting is never consulted on Fuchsia.
    513   DCHECK_EQ(path, common_flags()->log_path);
    514 }
    515 
    516 void __sanitizer_set_report_fd(void *fd) {
    517   UNREACHABLE("not available on Fuchsia");
    518 }
    519 }  // extern "C"
    520 
    521 #endif  // SANITIZER_FUCHSIA
    522