1 1.12 riastrad /* $NetBSD: asan.h,v 1.12 2022/09/13 09:39:49 riastradh Exp $ */ 2 1.1 maxv 3 1.1 maxv /* 4 1.9 maxv * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 1.1 maxv * All rights reserved. 6 1.1 maxv * 7 1.9 maxv * This code is part of the KASAN subsystem of the NetBSD kernel. 8 1.1 maxv * 9 1.1 maxv * Redistribution and use in source and binary forms, with or without 10 1.1 maxv * modification, are permitted provided that the following conditions 11 1.1 maxv * are met: 12 1.1 maxv * 1. Redistributions of source code must retain the above copyright 13 1.1 maxv * notice, this list of conditions and the following disclaimer. 14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 maxv * notice, this list of conditions and the following disclaimer in the 16 1.1 maxv * documentation and/or other materials provided with the distribution. 17 1.1 maxv * 18 1.9 maxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 1.9 maxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 1.9 maxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 1.9 maxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 1.9 maxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 1.9 maxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 1.9 maxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 1.9 maxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 1.9 maxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 1.9 maxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 1.9 maxv * SUCH DAMAGE. 29 1.1 maxv */ 30 1.1 maxv 31 1.12 riastrad #ifndef _AMD64_ASAN_H_ 32 1.12 riastrad #define _AMD64_ASAN_H_ 33 1.12 riastrad 34 1.1 maxv #include <sys/ksyms.h> 35 1.1 maxv 36 1.8 riastrad #include <uvm/uvm.h> 37 1.8 riastrad 38 1.1 maxv #include <amd64/pmap.h> 39 1.1 maxv #include <amd64/vmparam.h> 40 1.1 maxv 41 1.10 riastrad #include <x86/bootspace.h> 42 1.10 riastrad 43 1.11 hannken #include <machine/pmap_private.h> 44 1.11 hannken 45 1.1 maxv #ifdef __HAVE_PCPU_AREA 46 1.1 maxv #error "PCPU area not allowed with KASAN" 47 1.1 maxv #endif 48 1.1 maxv #ifdef __HAVE_DIRECT_MAP 49 1.1 maxv #error "DMAP not allowed with KASAN" 50 1.1 maxv #endif 51 1.1 maxv 52 1.1 maxv #define __MD_VIRTUAL_SHIFT 47 /* 48bit address space, cut half */ 53 1.7 maxv #define __MD_KERNMEM_BASE 0xFFFF800000000000 /* kern mem base address */ 54 1.1 maxv 55 1.1 maxv #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT)) 56 1.1 maxv #define KASAN_MD_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4))) 57 1.1 maxv #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE) 58 1.1 maxv 59 1.6 maxv /* -------------------------------------------------------------------------- */ 60 1.6 maxv 61 1.6 maxv /* 62 1.6 maxv * Early mapping, used to map just the stack at boot time. We rely on the fact 63 1.6 maxv * that VA = PA + KERNBASE. 64 1.6 maxv */ 65 1.6 maxv 66 1.1 maxv static bool __md_early __read_mostly = true; 67 1.1 maxv static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE); 68 1.1 maxv static size_t __md_earlytaken = 0; 69 1.1 maxv 70 1.1 maxv static paddr_t 71 1.1 maxv __md_early_palloc(void) 72 1.1 maxv { 73 1.1 maxv paddr_t ret; 74 1.1 maxv 75 1.1 maxv KASSERT(__md_earlytaken < 8); 76 1.1 maxv 77 1.1 maxv ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE); 78 1.1 maxv __md_earlytaken++; 79 1.1 maxv 80 1.1 maxv ret -= KERNBASE; 81 1.1 maxv 82 1.1 maxv return ret; 83 1.1 maxv } 84 1.1 maxv 85 1.6 maxv static void 86 1.6 maxv __md_early_shadow_map_page(vaddr_t va) 87 1.1 maxv { 88 1.6 maxv extern struct bootspace bootspace; 89 1.6 maxv const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P; 90 1.6 maxv pt_entry_t *pdir = (pt_entry_t *)bootspace.pdir; 91 1.1 maxv paddr_t pa; 92 1.1 maxv 93 1.6 maxv if (!pmap_valid_entry(pdir[pl4_pi(va)])) { 94 1.6 maxv pa = __md_early_palloc(); 95 1.6 maxv pdir[pl4_pi(va)] = pa | pteflags; 96 1.6 maxv } 97 1.6 maxv pdir = (pt_entry_t *)((pdir[pl4_pi(va)] & PTE_FRAME) + KERNBASE); 98 1.6 maxv 99 1.6 maxv if (!pmap_valid_entry(pdir[pl3_pi(va)])) { 100 1.6 maxv pa = __md_early_palloc(); 101 1.6 maxv pdir[pl3_pi(va)] = pa | pteflags; 102 1.6 maxv } 103 1.6 maxv pdir = (pt_entry_t *)((pdir[pl3_pi(va)] & PTE_FRAME) + KERNBASE); 104 1.6 maxv 105 1.6 maxv if (!pmap_valid_entry(pdir[pl2_pi(va)])) { 106 1.6 maxv pa = __md_early_palloc(); 107 1.6 maxv pdir[pl2_pi(va)] = pa | pteflags; 108 1.6 maxv } 109 1.6 maxv pdir = (pt_entry_t *)((pdir[pl2_pi(va)] & PTE_FRAME) + KERNBASE); 110 1.6 maxv 111 1.6 maxv if (!pmap_valid_entry(pdir[pl1_pi(va)])) { 112 1.1 maxv pa = __md_early_palloc(); 113 1.6 maxv pdir[pl1_pi(va)] = pa | pteflags | pmap_pg_g; 114 1.6 maxv } 115 1.6 maxv } 116 1.6 maxv 117 1.6 maxv /* -------------------------------------------------------------------------- */ 118 1.6 maxv 119 1.6 maxv static inline int8_t * 120 1.6 maxv kasan_md_addr_to_shad(const void *addr) 121 1.6 maxv { 122 1.6 maxv vaddr_t va = (vaddr_t)addr; 123 1.6 maxv return (int8_t *)(KASAN_MD_SHADOW_START + 124 1.7 maxv ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT)); 125 1.6 maxv } 126 1.1 maxv 127 1.6 maxv static inline bool 128 1.6 maxv kasan_md_unsupported(vaddr_t addr) 129 1.6 maxv { 130 1.6 maxv return (addr >= (vaddr_t)PTE_BASE && 131 1.6 maxv addr < ((vaddr_t)PTE_BASE + NBPD_L4)); 132 1.6 maxv } 133 1.6 maxv 134 1.6 maxv static paddr_t 135 1.6 maxv __md_palloc(void) 136 1.6 maxv { 137 1.4 maxv /* The page is zeroed. */ 138 1.6 maxv return pmap_get_physpage(); 139 1.1 maxv } 140 1.1 maxv 141 1.4 maxv static inline paddr_t 142 1.4 maxv __md_palloc_large(void) 143 1.4 maxv { 144 1.4 maxv struct pglist pglist; 145 1.4 maxv int ret; 146 1.4 maxv 147 1.4 maxv if (!uvm.page_init_done) 148 1.4 maxv return 0; 149 1.4 maxv 150 1.4 maxv ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0, 151 1.4 maxv &pglist, 1, 0); 152 1.4 maxv if (ret != 0) 153 1.4 maxv return 0; 154 1.4 maxv 155 1.4 maxv /* The page may not be zeroed. */ 156 1.4 maxv return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 157 1.4 maxv } 158 1.4 maxv 159 1.1 maxv static void 160 1.1 maxv kasan_md_shadow_map_page(vaddr_t va) 161 1.1 maxv { 162 1.4 maxv const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P; 163 1.1 maxv paddr_t pa; 164 1.1 maxv 165 1.6 maxv if (__predict_false(__md_early)) { 166 1.6 maxv __md_early_shadow_map_page(va); 167 1.6 maxv return; 168 1.6 maxv } 169 1.6 maxv 170 1.1 maxv if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) { 171 1.1 maxv pa = __md_palloc(); 172 1.4 maxv L4_BASE[pl4_i(va)] = pa | pteflags; 173 1.1 maxv } 174 1.1 maxv if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) { 175 1.1 maxv pa = __md_palloc(); 176 1.4 maxv L3_BASE[pl3_i(va)] = pa | pteflags; 177 1.1 maxv } 178 1.1 maxv if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) { 179 1.4 maxv if ((pa = __md_palloc_large()) != 0) { 180 1.4 maxv L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS | 181 1.4 maxv pmap_pg_g; 182 1.4 maxv __insn_barrier(); 183 1.4 maxv __builtin_memset((void *)va, 0, NBPD_L2); 184 1.4 maxv return; 185 1.4 maxv } 186 1.1 maxv pa = __md_palloc(); 187 1.4 maxv L2_BASE[pl2_i(va)] = pa | pteflags; 188 1.4 maxv } else if (L2_BASE[pl2_i(va)] & PTE_PS) { 189 1.4 maxv return; 190 1.1 maxv } 191 1.1 maxv if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) { 192 1.1 maxv pa = __md_palloc(); 193 1.4 maxv L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g; 194 1.1 maxv } 195 1.1 maxv } 196 1.1 maxv 197 1.1 maxv /* 198 1.1 maxv * Map only the current stack. We will map the rest in kasan_init. 199 1.1 maxv */ 200 1.1 maxv static void 201 1.1 maxv kasan_md_early_init(void *stack) 202 1.1 maxv { 203 1.1 maxv kasan_shadow_map(stack, USPACE); 204 1.1 maxv __md_early = false; 205 1.1 maxv } 206 1.1 maxv 207 1.1 maxv /* 208 1.1 maxv * Create the shadow mapping. We don't create the 'User' area, because we 209 1.1 maxv * exclude it from the monitoring. The 'Main' area is created dynamically 210 1.1 maxv * in pmap_growkernel. 211 1.1 maxv */ 212 1.1 maxv static void 213 1.1 maxv kasan_md_init(void) 214 1.1 maxv { 215 1.1 maxv extern struct bootspace bootspace; 216 1.1 maxv size_t i; 217 1.1 maxv 218 1.1 maxv CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN); 219 1.1 maxv 220 1.1 maxv /* Kernel. */ 221 1.1 maxv for (i = 0; i < BTSPACE_NSEGS; i++) { 222 1.1 maxv if (bootspace.segs[i].type == BTSEG_NONE) { 223 1.1 maxv continue; 224 1.1 maxv } 225 1.1 maxv kasan_shadow_map((void *)bootspace.segs[i].va, 226 1.1 maxv bootspace.segs[i].sz); 227 1.1 maxv } 228 1.1 maxv 229 1.1 maxv /* Boot region. */ 230 1.1 maxv kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz); 231 1.1 maxv 232 1.1 maxv /* Module map. */ 233 1.1 maxv kasan_shadow_map((void *)bootspace.smodule, 234 1.1 maxv (size_t)(bootspace.emodule - bootspace.smodule)); 235 1.1 maxv 236 1.1 maxv /* The bootstrap spare va. */ 237 1.1 maxv kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE); 238 1.1 maxv } 239 1.1 maxv 240 1.1 maxv static inline bool 241 1.1 maxv __md_unwind_end(const char *name) 242 1.1 maxv { 243 1.1 maxv if (!strcmp(name, "syscall") || 244 1.2 maxv !strcmp(name, "alltraps") || 245 1.1 maxv !strcmp(name, "handle_syscall") || 246 1.2 maxv !strncmp(name, "Xtrap", 5) || 247 1.1 maxv !strncmp(name, "Xintr", 5) || 248 1.1 maxv !strncmp(name, "Xhandle", 7) || 249 1.1 maxv !strncmp(name, "Xresume", 7) || 250 1.1 maxv !strncmp(name, "Xstray", 6) || 251 1.1 maxv !strncmp(name, "Xhold", 5) || 252 1.1 maxv !strncmp(name, "Xrecurse", 8) || 253 1.1 maxv !strcmp(name, "Xdoreti") || 254 1.1 maxv !strncmp(name, "Xsoft", 5)) { 255 1.1 maxv return true; 256 1.1 maxv } 257 1.1 maxv 258 1.1 maxv return false; 259 1.1 maxv } 260 1.1 maxv 261 1.1 maxv static void 262 1.1 maxv kasan_md_unwind(void) 263 1.1 maxv { 264 1.1 maxv uint64_t *rbp, rip; 265 1.1 maxv const char *mod; 266 1.1 maxv const char *sym; 267 1.1 maxv size_t nsym; 268 1.1 maxv int error; 269 1.1 maxv 270 1.1 maxv rbp = (uint64_t *)__builtin_frame_address(0); 271 1.1 maxv nsym = 0; 272 1.1 maxv 273 1.1 maxv while (1) { 274 1.1 maxv /* 8(%rbp) contains the saved %rip. */ 275 1.1 maxv rip = *(rbp + 1); 276 1.1 maxv 277 1.1 maxv if (rip < KERNBASE) { 278 1.1 maxv break; 279 1.1 maxv } 280 1.1 maxv error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC); 281 1.1 maxv if (error) { 282 1.1 maxv break; 283 1.1 maxv } 284 1.1 maxv printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod); 285 1.1 maxv if (__md_unwind_end(sym)) { 286 1.1 maxv break; 287 1.1 maxv } 288 1.1 maxv 289 1.1 maxv rbp = (uint64_t *)*(rbp); 290 1.1 maxv if (rbp == 0) { 291 1.1 maxv break; 292 1.1 maxv } 293 1.1 maxv nsym++; 294 1.1 maxv 295 1.1 maxv if (nsym >= 15) { 296 1.1 maxv break; 297 1.1 maxv } 298 1.1 maxv } 299 1.1 maxv } 300 1.12 riastrad 301 1.12 riastrad #endif /* _AMD64_ASAN_H_ */ 302