1 1.1 mrg 2 1.1 mrg #include "hwasan_thread.h" 3 1.1 mrg 4 1.1 mrg #include "hwasan.h" 5 1.1 mrg #include "hwasan_interface_internal.h" 6 1.1 mrg #include "hwasan_mapping.h" 7 1.1 mrg #include "hwasan_poisoning.h" 8 1.1 mrg #include "sanitizer_common/sanitizer_atomic.h" 9 1.1 mrg #include "sanitizer_common/sanitizer_file.h" 10 1.1 mrg #include "sanitizer_common/sanitizer_placement_new.h" 11 1.1 mrg #include "sanitizer_common/sanitizer_tls_get_addr.h" 12 1.1 mrg 13 1.1 mrg namespace __hwasan { 14 1.1 mrg 15 1.1 mrg static u32 RandomSeed() { 16 1.1 mrg u32 seed; 17 1.1 mrg do { 18 1.1 mrg if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed), 19 1.1 mrg /*blocking=*/false))) { 20 1.1 mrg seed = static_cast<u32>( 21 1.1 mrg (NanoTime() >> 12) ^ 22 1.1 mrg (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4)); 23 1.1 mrg } 24 1.1 mrg } while (!seed); 25 1.1 mrg return seed; 26 1.1 mrg } 27 1.1 mrg 28 1.1 mrg void Thread::InitRandomState() { 29 1.1 mrg random_state_ = flags()->random_tags ? RandomSeed() : unique_id_; 30 1.1 mrg random_state_inited_ = true; 31 1.1 mrg 32 1.1 mrg // Push a random number of zeros onto the ring buffer so that the first stack 33 1.1 mrg // tag base will be random. 34 1.1 mrg for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i) 35 1.1 mrg stack_allocations_->push(0); 36 1.1 mrg } 37 1.1 mrg 38 1.1 mrg void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size, 39 1.1 mrg const InitState *state) { 40 1.1 mrg CHECK_EQ(0, unique_id_); // try to catch bad stack reuse 41 1.1 mrg CHECK_EQ(0, stack_top_); 42 1.1 mrg CHECK_EQ(0, stack_bottom_); 43 1.1 mrg 44 1.1 mrg static atomic_uint64_t unique_id; 45 1.1 mrg unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed); 46 1.1 mrg 47 1.1 mrg if (auto sz = flags()->heap_history_size) 48 1.1 mrg heap_allocations_ = HeapAllocationsRingBuffer::New(sz); 49 1.1 mrg 50 1.1 mrg #if !SANITIZER_FUCHSIA 51 1.1 mrg // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will 52 1.1 mrg // be initialized before we enter the thread itself, so we will instead call 53 1.1 mrg // this later. 54 1.1 mrg InitStackRingBuffer(stack_buffer_start, stack_buffer_size); 55 1.1 mrg #endif 56 1.1 mrg InitStackAndTls(state); 57 1.1 mrg } 58 1.1 mrg 59 1.1 mrg void Thread::InitStackRingBuffer(uptr stack_buffer_start, 60 1.1 mrg uptr stack_buffer_size) { 61 1.1 mrg HwasanTSDThreadInit(); // Only needed with interceptors. 62 1.1 mrg uptr *ThreadLong = GetCurrentThreadLongPtr(); 63 1.1 mrg // The following implicitly sets (this) as the current thread. 64 1.1 mrg stack_allocations_ = new (ThreadLong) 65 1.1 mrg StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size); 66 1.1 mrg // Check that it worked. 67 1.1 mrg CHECK_EQ(GetCurrentThread(), this); 68 1.1 mrg 69 1.1 mrg // ScopedTaggingDisable needs GetCurrentThread to be set up. 70 1.1 mrg ScopedTaggingDisabler disabler; 71 1.1 mrg 72 1.1 mrg if (stack_bottom_) { 73 1.1 mrg int local; 74 1.1 mrg CHECK(AddrIsInStack((uptr)&local)); 75 1.1 mrg CHECK(MemIsApp(stack_bottom_)); 76 1.1 mrg CHECK(MemIsApp(stack_top_ - 1)); 77 1.1 mrg } 78 1.1 mrg 79 1.1 mrg if (flags()->verbose_threads) { 80 1.1 mrg if (IsMainThread()) { 81 1.1 mrg Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n", 82 1.1 mrg sizeof(Thread), heap_allocations_->SizeInBytes(), 83 1.1 mrg stack_allocations_->size() * sizeof(uptr)); 84 1.1 mrg } 85 1.1 mrg Print("Creating : "); 86 1.1 mrg } 87 1.1 mrg } 88 1.1 mrg 89 1.1 mrg void Thread::ClearShadowForThreadStackAndTLS() { 90 1.1 mrg if (stack_top_ != stack_bottom_) 91 1.1 mrg TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0); 92 1.1 mrg if (tls_begin_ != tls_end_) 93 1.1 mrg TagMemory(tls_begin_, tls_end_ - tls_begin_, 0); 94 1.1 mrg } 95 1.1 mrg 96 1.1 mrg void Thread::Destroy() { 97 1.1 mrg if (flags()->verbose_threads) 98 1.1 mrg Print("Destroying: "); 99 1.1 mrg AllocatorSwallowThreadLocalCache(allocator_cache()); 100 1.1 mrg ClearShadowForThreadStackAndTLS(); 101 1.1 mrg if (heap_allocations_) 102 1.1 mrg heap_allocations_->Delete(); 103 1.1 mrg DTLS_Destroy(); 104 1.1 mrg // Unregister this as the current thread. 105 1.1 mrg // Instrumented code can not run on this thread from this point onwards, but 106 1.1 mrg // malloc/free can still be served. Glibc may call free() very late, after all 107 1.1 mrg // TSD destructors are done. 108 1.1 mrg CHECK_EQ(GetCurrentThread(), this); 109 1.1 mrg *GetCurrentThreadLongPtr() = 0; 110 1.1 mrg } 111 1.1 mrg 112 1.1 mrg void Thread::Print(const char *Prefix) { 113 1.1 mrg Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_, 114 1.1 mrg (void *)this, stack_bottom(), stack_top(), 115 1.1 mrg stack_top() - stack_bottom(), tls_begin(), tls_end()); 116 1.1 mrg } 117 1.1 mrg 118 1.1 mrg static u32 xorshift(u32 state) { 119 1.1 mrg state ^= state << 13; 120 1.1 mrg state ^= state >> 17; 121 1.1 mrg state ^= state << 5; 122 1.1 mrg return state; 123 1.1 mrg } 124 1.1 mrg 125 1.1 mrg // Generate a (pseudo-)random non-zero tag. 126 1.1 mrg tag_t Thread::GenerateRandomTag(uptr num_bits) { 127 1.1 mrg DCHECK_GT(num_bits, 0); 128 1.1 mrg if (tagging_disabled_) 129 1.1 mrg return 0; 130 1.1 mrg tag_t tag; 131 1.1 mrg const uptr tag_mask = (1ULL << num_bits) - 1; 132 1.1 mrg do { 133 1.1 mrg if (flags()->random_tags) { 134 1.1 mrg if (!random_buffer_) { 135 1.1 mrg EnsureRandomStateInited(); 136 1.1 mrg random_buffer_ = random_state_ = xorshift(random_state_); 137 1.1 mrg } 138 1.1 mrg CHECK(random_buffer_); 139 1.1 mrg tag = random_buffer_ & tag_mask; 140 1.1 mrg random_buffer_ >>= num_bits; 141 1.1 mrg } else { 142 1.1 mrg EnsureRandomStateInited(); 143 1.1 mrg random_state_ += 1; 144 1.1 mrg tag = random_state_ & tag_mask; 145 1.1 mrg } 146 1.1 mrg } while (!tag); 147 1.1 mrg return tag; 148 1.1 mrg } 149 1.1 mrg 150 1.1 mrg } // namespace __hwasan 151