1 1.1 kamil //===-- safestack.cc ------------------------------------------------------===// 2 1.1 kamil // 3 1.1 kamil // The LLVM Compiler Infrastructure 4 1.1 kamil // 5 1.1 kamil // This file is distributed under the University of Illinois Open Source 6 1.1 kamil // License. See LICENSE.TXT for details. 7 1.1 kamil // 8 1.1 kamil //===----------------------------------------------------------------------===// 9 1.1 kamil // 10 1.1 kamil // This file implements the runtime support for the safe stack protection 11 1.1 kamil // mechanism. The runtime manages allocation/deallocation of the unsafe stack 12 1.1 kamil // for the main thread, as well as all pthreads that are created/destroyed 13 1.1 kamil // during program execution. 14 1.1 kamil // 15 1.1 kamil //===----------------------------------------------------------------------===// 16 1.1 kamil 17 1.1 kamil #include <errno.h> 18 1.1 kamil #include <limits.h> 19 1.1 kamil #include <pthread.h> 20 1.1 kamil #include <stddef.h> 21 1.1 kamil #include <stdint.h> 22 1.1 kamil #include <unistd.h> 23 1.1 kamil #include <stdlib.h> 24 1.1 kamil #include <sys/resource.h> 25 1.1 kamil #include <sys/types.h> 26 1.1 kamil #if !defined(__NetBSD__) 27 1.1 kamil #include <sys/user.h> 28 1.1 kamil #endif 29 1.1 kamil 30 1.1 kamil #include "interception/interception.h" 31 1.1 kamil #include "sanitizer_common/sanitizer_common.h" 32 1.1 kamil 33 1.1 kamil // TODO: The runtime library does not currently protect the safe stack beyond 34 1.1 kamil // relying on the system-enforced ASLR. The protection of the (safe) stack can 35 1.1 kamil // be provided by three alternative features: 36 1.1 kamil // 37 1.1 kamil // 1) Protection via hardware segmentation on x86-32 and some x86-64 38 1.1 kamil // architectures: the (safe) stack segment (implicitly accessed via the %ss 39 1.1 kamil // segment register) can be separated from the data segment (implicitly 40 1.1 kamil // accessed via the %ds segment register). Dereferencing a pointer to the safe 41 1.1 kamil // segment would result in a segmentation fault. 42 1.1 kamil // 43 1.1 kamil // 2) Protection via software fault isolation: memory writes that are not meant 44 1.1 kamil // to access the safe stack can be prevented from doing so through runtime 45 1.1 kamil // instrumentation. One way to do it is to allocate the safe stack(s) in the 46 1.1 kamil // upper half of the userspace and bitmask the corresponding upper bit of the 47 1.1 kamil // memory addresses of memory writes that are not meant to access the safe 48 1.1 kamil // stack. 49 1.1 kamil // 50 1.1 kamil // 3) Protection via information hiding on 64 bit architectures: the location 51 1.1 kamil // of the safe stack(s) can be randomized through secure mechanisms, and the 52 1.1 kamil // leakage of the stack pointer can be prevented. Currently, libc can leak the 53 1.1 kamil // stack pointer in several ways (e.g. in longjmp, signal handling, user-level 54 1.1 kamil // context switching related functions, etc.). These can be fixed in libc and 55 1.1 kamil // in other low-level libraries, by either eliminating the escaping/dumping of 56 1.1 kamil // the stack pointer (i.e., %rsp) when that's possible, or by using 57 1.1 kamil // encryption/PTR_MANGLE (XOR-ing the dumped stack pointer with another secret 58 1.1 kamil // we control and protect better, as is already done for setjmp in glibc.) 59 1.1 kamil // Furthermore, a static machine code level verifier can be ran after code 60 1.1 kamil // generation to make sure that the stack pointer is never written to memory, 61 1.1 kamil // or if it is, its written on the safe stack. 62 1.1 kamil // 63 1.1 kamil // Finally, while the Unsafe Stack pointer is currently stored in a thread 64 1.1 kamil // local variable, with libc support it could be stored in the TCB (thread 65 1.1 kamil // control block) as well, eliminating another level of indirection and making 66 1.1 kamil // such accesses faster. Alternatively, dedicating a separate register for 67 1.1 kamil // storing it would also be possible. 68 1.1 kamil 69 1.1 kamil /// Minimum stack alignment for the unsafe stack. 70 1.1 kamil const unsigned kStackAlign = 16; 71 1.1 kamil 72 1.1 kamil /// Default size of the unsafe stack. This value is only used if the stack 73 1.1 kamil /// size rlimit is set to infinity. 74 1.1 kamil const unsigned kDefaultUnsafeStackSize = 0x2800000; 75 1.1 kamil 76 1.1 kamil /// Runtime page size obtained through sysconf 77 1.1 kamil static unsigned pageSize; 78 1.1 kamil 79 1.1 kamil // TODO: To make accessing the unsafe stack pointer faster, we plan to 80 1.1 kamil // eventually store it directly in the thread control block data structure on 81 1.1 kamil // platforms where this structure is pointed to by %fs or %gs. This is exactly 82 1.1 kamil // the same mechanism as currently being used by the traditional stack 83 1.1 kamil // protector pass to store the stack guard (see getStackCookieLocation() 84 1.1 kamil // function above). Doing so requires changing the tcbhead_t struct in glibc 85 1.1 kamil // on Linux and tcb struct in libc on FreeBSD. 86 1.1 kamil // 87 1.1 kamil // For now, store it in a thread-local variable. 88 1.1 kamil extern "C" { 89 1.1 kamil __attribute__((visibility( 90 1.1 kamil "default"))) __thread void *__safestack_unsafe_stack_ptr = nullptr; 91 1.1 kamil } 92 1.1 kamil 93 1.1 kamil // Per-thread unsafe stack information. It's not frequently accessed, so there 94 1.1 kamil // it can be kept out of the tcb in normal thread-local variables. 95 1.1 kamil static __thread void *unsafe_stack_start = nullptr; 96 1.1 kamil static __thread size_t unsafe_stack_size = 0; 97 1.1 kamil static __thread size_t unsafe_stack_guard = 0; 98 1.1 kamil 99 1.1 kamil using namespace __sanitizer; 100 1.1 kamil 101 1.1 kamil static inline void *unsafe_stack_alloc(size_t size, size_t guard) { 102 1.1 kamil CHECK_GE(size + guard, size); 103 1.1 kamil void *addr = MmapOrDie(size + guard, "unsafe_stack_alloc"); 104 1.1 kamil MprotectNoAccess((uptr)addr, (uptr)guard); 105 1.1 kamil return (char *)addr + guard; 106 1.1 kamil } 107 1.1 kamil 108 1.1 kamil static inline void unsafe_stack_setup(void *start, size_t size, size_t guard) { 109 1.1 kamil CHECK_GE((char *)start + size, (char *)start); 110 1.1 kamil CHECK_GE((char *)start + guard, (char *)start); 111 1.1 kamil void *stack_ptr = (char *)start + size; 112 1.1 kamil CHECK_EQ((((size_t)stack_ptr) & (kStackAlign - 1)), 0); 113 1.1 kamil 114 1.1 kamil __safestack_unsafe_stack_ptr = stack_ptr; 115 1.1 kamil unsafe_stack_start = start; 116 1.1 kamil unsafe_stack_size = size; 117 1.1 kamil unsafe_stack_guard = guard; 118 1.1 kamil } 119 1.1 kamil 120 1.1 kamil /// Thread data for the cleanup handler 121 1.1 kamil static pthread_key_t thread_cleanup_key; 122 1.1 kamil 123 1.1 kamil /// Safe stack per-thread information passed to the thread_start function 124 1.1 kamil struct tinfo { 125 1.1 kamil void *(*start_routine)(void *); 126 1.1 kamil void *start_routine_arg; 127 1.1 kamil 128 1.1 kamil void *unsafe_stack_start; 129 1.1 kamil size_t unsafe_stack_size; 130 1.1 kamil size_t unsafe_stack_guard; 131 1.1 kamil }; 132 1.1 kamil 133 1.1 kamil /// Wrap the thread function in order to deallocate the unsafe stack when the 134 1.1 kamil /// thread terminates by returning from its main function. 135 1.1 kamil static void *thread_start(void *arg) { 136 1.1 kamil struct tinfo *tinfo = (struct tinfo *)arg; 137 1.1 kamil 138 1.1 kamil void *(*start_routine)(void *) = tinfo->start_routine; 139 1.1 kamil void *start_routine_arg = tinfo->start_routine_arg; 140 1.1 kamil 141 1.1 kamil // Setup the unsafe stack; this will destroy tinfo content 142 1.1 kamil unsafe_stack_setup(tinfo->unsafe_stack_start, tinfo->unsafe_stack_size, 143 1.1 kamil tinfo->unsafe_stack_guard); 144 1.1 kamil 145 1.1 kamil // Make sure out thread-specific destructor will be called 146 1.1 kamil pthread_setspecific(thread_cleanup_key, (void *)1); 147 1.1 kamil 148 1.1 kamil return start_routine(start_routine_arg); 149 1.1 kamil } 150 1.1 kamil 151 1.1 kamil /// Linked list used to store exiting threads stack/thread information. 152 1.1 kamil struct thread_stack_ll { 153 1.1 kamil struct thread_stack_ll *next; 154 1.1 kamil void *stack_base; 155 1.1 kamil size_t size; 156 1.1 kamil pid_t pid; 157 1.1 kamil tid_t tid; 158 1.1 kamil }; 159 1.1 kamil 160 1.1 kamil /// Linked list of unsafe stacks for threads that are exiting. We delay 161 1.1 kamil /// unmapping them until the thread exits. 162 1.1 kamil static thread_stack_ll *thread_stacks = nullptr; 163 1.1 kamil static pthread_mutex_t thread_stacks_mutex = PTHREAD_MUTEX_INITIALIZER; 164 1.1 kamil 165 1.1 kamil /// Thread-specific data destructor. We want to free the unsafe stack only after 166 1.1 kamil /// this thread is terminated. libc can call functions in safestack-instrumented 167 1.1 kamil /// code (like free) after thread-specific data destructors have run. 168 1.1 kamil static void thread_cleanup_handler(void *_iter) { 169 1.1 kamil CHECK_NE(unsafe_stack_start, nullptr); 170 1.1 kamil pthread_setspecific(thread_cleanup_key, NULL); 171 1.1 kamil 172 1.1 kamil pthread_mutex_lock(&thread_stacks_mutex); 173 1.1 kamil // Temporary list to hold the previous threads stacks so we don't hold the 174 1.1 kamil // thread_stacks_mutex for long. 175 1.1 kamil thread_stack_ll *temp_stacks = thread_stacks; 176 1.1 kamil thread_stacks = nullptr; 177 1.1 kamil pthread_mutex_unlock(&thread_stacks_mutex); 178 1.1 kamil 179 1.1 kamil pid_t pid = getpid(); 180 1.1 kamil tid_t tid = GetTid(); 181 1.1 kamil 182 1.1 kamil // Free stacks for dead threads 183 1.1 kamil thread_stack_ll **stackp = &temp_stacks; 184 1.1 kamil while (*stackp) { 185 1.1 kamil thread_stack_ll *stack = *stackp; 186 1.1 kamil int error; 187 1.1 kamil if (stack->pid != pid || 188 1.1 kamil (internal_iserror(TgKill(stack->pid, stack->tid, 0), &error) && 189 1.1 kamil error == ESRCH)) { 190 1.1 kamil UnmapOrDie(stack->stack_base, stack->size); 191 1.1 kamil *stackp = stack->next; 192 1.1 kamil free(stack); 193 1.1 kamil } else 194 1.1 kamil stackp = &stack->next; 195 1.1 kamil } 196 1.1 kamil 197 1.1 kamil thread_stack_ll *cur_stack = 198 1.1 kamil (thread_stack_ll *)malloc(sizeof(thread_stack_ll)); 199 1.1 kamil cur_stack->stack_base = (char *)unsafe_stack_start - unsafe_stack_guard; 200 1.1 kamil cur_stack->size = unsafe_stack_size + unsafe_stack_guard; 201 1.1 kamil cur_stack->pid = pid; 202 1.1 kamil cur_stack->tid = tid; 203 1.1 kamil 204 1.1 kamil pthread_mutex_lock(&thread_stacks_mutex); 205 1.1 kamil // Merge thread_stacks with the current thread's stack and any remaining 206 1.1 kamil // temp_stacks 207 1.1 kamil *stackp = thread_stacks; 208 1.1 kamil cur_stack->next = temp_stacks; 209 1.1 kamil thread_stacks = cur_stack; 210 1.1 kamil pthread_mutex_unlock(&thread_stacks_mutex); 211 1.1 kamil 212 1.1 kamil unsafe_stack_start = nullptr; 213 1.1 kamil } 214 1.1 kamil 215 1.1 kamil static void EnsureInterceptorsInitialized(); 216 1.1 kamil 217 1.1 kamil /// Intercept thread creation operation to allocate and setup the unsafe stack 218 1.1 kamil INTERCEPTOR(int, pthread_create, pthread_t *thread, 219 1.1 kamil const pthread_attr_t *attr, 220 1.1 kamil void *(*start_routine)(void*), void *arg) { 221 1.1 kamil EnsureInterceptorsInitialized(); 222 1.1 kamil size_t size = 0; 223 1.1 kamil size_t guard = 0; 224 1.1 kamil 225 1.1 kamil if (attr) { 226 1.1 kamil pthread_attr_getstacksize(attr, &size); 227 1.1 kamil pthread_attr_getguardsize(attr, &guard); 228 1.1 kamil } else { 229 1.1 kamil // get pthread default stack size 230 1.1 kamil pthread_attr_t tmpattr; 231 1.1 kamil pthread_attr_init(&tmpattr); 232 1.1 kamil pthread_attr_getstacksize(&tmpattr, &size); 233 1.1 kamil pthread_attr_getguardsize(&tmpattr, &guard); 234 1.1 kamil pthread_attr_destroy(&tmpattr); 235 1.1 kamil } 236 1.1 kamil 237 1.1 kamil CHECK_NE(size, 0); 238 1.1 kamil CHECK_EQ((size & (kStackAlign - 1)), 0); 239 1.1 kamil CHECK_EQ((guard & (pageSize - 1)), 0); 240 1.1 kamil 241 1.1 kamil void *addr = unsafe_stack_alloc(size, guard); 242 1.1 kamil struct tinfo *tinfo = 243 1.1 kamil (struct tinfo *)(((char *)addr) + size - sizeof(struct tinfo)); 244 1.1 kamil tinfo->start_routine = start_routine; 245 1.1 kamil tinfo->start_routine_arg = arg; 246 1.1 kamil tinfo->unsafe_stack_start = addr; 247 1.1 kamil tinfo->unsafe_stack_size = size; 248 1.1 kamil tinfo->unsafe_stack_guard = guard; 249 1.1 kamil 250 1.1 kamil return REAL(pthread_create)(thread, attr, thread_start, tinfo); 251 1.1 kamil } 252 1.1 kamil 253 1.1 kamil static BlockingMutex interceptor_init_lock(LINKER_INITIALIZED); 254 1.1 kamil static bool interceptors_inited = false; 255 1.1 kamil 256 1.1 kamil static void EnsureInterceptorsInitialized() { 257 1.1 kamil BlockingMutexLock lock(&interceptor_init_lock); 258 1.1 kamil if (interceptors_inited) return; 259 1.1 kamil 260 1.1 kamil // Initialize pthread interceptors for thread allocation 261 1.1 kamil INTERCEPT_FUNCTION(pthread_create); 262 1.1 kamil 263 1.1 kamil interceptors_inited = true; 264 1.1 kamil } 265 1.1 kamil 266 1.1 kamil extern "C" __attribute__((visibility("default"))) 267 1.1 kamil #if !SANITIZER_CAN_USE_PREINIT_ARRAY 268 1.1 kamil // On ELF platforms, the constructor is invoked using .preinit_array (see below) 269 1.1 kamil __attribute__((constructor(0))) 270 1.1 kamil #endif 271 1.1 kamil void __safestack_init() { 272 1.1 kamil // Determine the stack size for the main thread. 273 1.1 kamil size_t size = kDefaultUnsafeStackSize; 274 1.1 kamil size_t guard = 4096; 275 1.1 kamil 276 1.1 kamil struct rlimit limit; 277 1.1 kamil if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur != RLIM_INFINITY) 278 1.1 kamil size = limit.rlim_cur; 279 1.1 kamil 280 1.1 kamil // Allocate unsafe stack for main thread 281 1.1 kamil void *addr = unsafe_stack_alloc(size, guard); 282 1.1 kamil 283 1.1 kamil unsafe_stack_setup(addr, size, guard); 284 1.1 kamil pageSize = sysconf(_SC_PAGESIZE); 285 1.1 kamil 286 1.1 kamil // Setup the cleanup handler 287 1.1 kamil pthread_key_create(&thread_cleanup_key, thread_cleanup_handler); 288 1.1 kamil } 289 1.1 kamil 290 1.1 kamil #if SANITIZER_CAN_USE_PREINIT_ARRAY 291 1.1 kamil // On ELF platforms, run safestack initialization before any other constructors. 292 1.1 kamil // On other platforms we use the constructor attribute to arrange to run our 293 1.1 kamil // initialization early. 294 1.1 kamil extern "C" { 295 1.1 kamil __attribute__((section(".preinit_array"), 296 1.1 kamil used)) void (*__safestack_preinit)(void) = __safestack_init; 297 1.1 kamil } 298 1.1 kamil #endif 299 1.1 kamil 300 1.1 kamil extern "C" 301 1.1 kamil __attribute__((visibility("default"))) void *__get_unsafe_stack_bottom() { 302 1.1 kamil return unsafe_stack_start; 303 1.1 kamil } 304 1.1 kamil 305 1.1 kamil extern "C" 306 1.1 kamil __attribute__((visibility("default"))) void *__get_unsafe_stack_top() { 307 1.1 kamil return (char*)unsafe_stack_start + unsafe_stack_size; 308 1.1 kamil } 309 1.1 kamil 310 1.1 kamil extern "C" 311 1.1 kamil __attribute__((visibility("default"))) void *__get_unsafe_stack_start() { 312 1.1 kamil return unsafe_stack_start; 313 1.1 kamil } 314 1.1 kamil 315 1.1 kamil extern "C" 316 1.1 kamil __attribute__((visibility("default"))) void *__get_unsafe_stack_ptr() { 317 1.1 kamil return __safestack_unsafe_stack_ptr; 318 1.1 kamil } 319