Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: asan.h,v 1.12 2022/09/13 09:39:49 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
      5  * All rights reserved.
      6  *
      7  * This code is part of the KASAN subsystem of the NetBSD kernel.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 #ifndef	_AMD64_ASAN_H_
     32 #define	_AMD64_ASAN_H_
     33 
     34 #include <sys/ksyms.h>
     35 
     36 #include <uvm/uvm.h>
     37 
     38 #include <amd64/pmap.h>
     39 #include <amd64/vmparam.h>
     40 
     41 #include <x86/bootspace.h>
     42 
     43 #include <machine/pmap_private.h>
     44 
     45 #ifdef __HAVE_PCPU_AREA
     46 #error "PCPU area not allowed with KASAN"
     47 #endif
     48 #ifdef __HAVE_DIRECT_MAP
     49 #error "DMAP not allowed with KASAN"
     50 #endif
     51 
     52 #define __MD_VIRTUAL_SHIFT	47	/* 48bit address space, cut half */
     53 #define __MD_KERNMEM_BASE	0xFFFF800000000000 /* kern mem base address */
     54 
     55 #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
     56 #define KASAN_MD_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
     57 #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
     58 
     59 /* -------------------------------------------------------------------------- */
     60 
     61 /*
     62  * Early mapping, used to map just the stack at boot time. We rely on the fact
     63  * that VA = PA + KERNBASE.
     64  */
     65 
     66 static bool __md_early __read_mostly = true;
     67 static uint8_t __md_earlypages[8 * PAGE_SIZE] __aligned(PAGE_SIZE);
     68 static size_t __md_earlytaken = 0;
     69 
     70 static paddr_t
     71 __md_early_palloc(void)
     72 {
     73 	paddr_t ret;
     74 
     75 	KASSERT(__md_earlytaken < 8);
     76 
     77 	ret = (paddr_t)(&__md_earlypages[0] + __md_earlytaken * PAGE_SIZE);
     78 	__md_earlytaken++;
     79 
     80 	ret -= KERNBASE;
     81 
     82 	return ret;
     83 }
     84 
     85 static void
     86 __md_early_shadow_map_page(vaddr_t va)
     87 {
     88 	extern struct bootspace bootspace;
     89 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
     90 	pt_entry_t *pdir = (pt_entry_t *)bootspace.pdir;
     91 	paddr_t pa;
     92 
     93 	if (!pmap_valid_entry(pdir[pl4_pi(va)])) {
     94 		pa = __md_early_palloc();
     95 		pdir[pl4_pi(va)] = pa | pteflags;
     96 	}
     97 	pdir = (pt_entry_t *)((pdir[pl4_pi(va)] & PTE_FRAME) + KERNBASE);
     98 
     99 	if (!pmap_valid_entry(pdir[pl3_pi(va)])) {
    100 		pa = __md_early_palloc();
    101 		pdir[pl3_pi(va)] = pa | pteflags;
    102 	}
    103 	pdir = (pt_entry_t *)((pdir[pl3_pi(va)] & PTE_FRAME) + KERNBASE);
    104 
    105 	if (!pmap_valid_entry(pdir[pl2_pi(va)])) {
    106 		pa = __md_early_palloc();
    107 		pdir[pl2_pi(va)] = pa | pteflags;
    108 	}
    109 	pdir = (pt_entry_t *)((pdir[pl2_pi(va)] & PTE_FRAME) + KERNBASE);
    110 
    111 	if (!pmap_valid_entry(pdir[pl1_pi(va)])) {
    112 		pa = __md_early_palloc();
    113 		pdir[pl1_pi(va)] = pa | pteflags | pmap_pg_g;
    114 	}
    115 }
    116 
    117 /* -------------------------------------------------------------------------- */
    118 
    119 static inline int8_t *
    120 kasan_md_addr_to_shad(const void *addr)
    121 {
    122 	vaddr_t va = (vaddr_t)addr;
    123 	return (int8_t *)(KASAN_MD_SHADOW_START +
    124 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
    125 }
    126 
    127 static inline bool
    128 kasan_md_unsupported(vaddr_t addr)
    129 {
    130 	return (addr >= (vaddr_t)PTE_BASE &&
    131 	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
    132 }
    133 
    134 static paddr_t
    135 __md_palloc(void)
    136 {
    137 	/* The page is zeroed. */
    138 	return pmap_get_physpage();
    139 }
    140 
    141 static inline paddr_t
    142 __md_palloc_large(void)
    143 {
    144 	struct pglist pglist;
    145 	int ret;
    146 
    147 	if (!uvm.page_init_done)
    148 		return 0;
    149 
    150 	ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
    151 	    &pglist, 1, 0);
    152 	if (ret != 0)
    153 		return 0;
    154 
    155 	/* The page may not be zeroed. */
    156 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
    157 }
    158 
    159 static void
    160 kasan_md_shadow_map_page(vaddr_t va)
    161 {
    162 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
    163 	paddr_t pa;
    164 
    165 	if (__predict_false(__md_early)) {
    166 		__md_early_shadow_map_page(va);
    167 		return;
    168 	}
    169 
    170 	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
    171 		pa = __md_palloc();
    172 		L4_BASE[pl4_i(va)] = pa | pteflags;
    173 	}
    174 	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
    175 		pa = __md_palloc();
    176 		L3_BASE[pl3_i(va)] = pa | pteflags;
    177 	}
    178 	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
    179 		if ((pa = __md_palloc_large()) != 0) {
    180 			L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
    181 			    pmap_pg_g;
    182 			__insn_barrier();
    183 			__builtin_memset((void *)va, 0, NBPD_L2);
    184 			return;
    185 		}
    186 		pa = __md_palloc();
    187 		L2_BASE[pl2_i(va)] = pa | pteflags;
    188 	} else if (L2_BASE[pl2_i(va)] & PTE_PS) {
    189 		return;
    190 	}
    191 	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
    192 		pa = __md_palloc();
    193 		L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
    194 	}
    195 }
    196 
    197 /*
    198  * Map only the current stack. We will map the rest in kasan_init.
    199  */
    200 static void
    201 kasan_md_early_init(void *stack)
    202 {
    203 	kasan_shadow_map(stack, USPACE);
    204 	__md_early = false;
    205 }
    206 
    207 /*
    208  * Create the shadow mapping. We don't create the 'User' area, because we
    209  * exclude it from the monitoring. The 'Main' area is created dynamically
    210  * in pmap_growkernel.
    211  */
    212 static void
    213 kasan_md_init(void)
    214 {
    215 	extern struct bootspace bootspace;
    216 	size_t i;
    217 
    218 	CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
    219 
    220 	/* Kernel. */
    221 	for (i = 0; i < BTSPACE_NSEGS; i++) {
    222 		if (bootspace.segs[i].type == BTSEG_NONE) {
    223 			continue;
    224 		}
    225 		kasan_shadow_map((void *)bootspace.segs[i].va,
    226 		    bootspace.segs[i].sz);
    227 	}
    228 
    229 	/* Boot region. */
    230 	kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
    231 
    232 	/* Module map. */
    233 	kasan_shadow_map((void *)bootspace.smodule,
    234 	    (size_t)(bootspace.emodule - bootspace.smodule));
    235 
    236 	/* The bootstrap spare va. */
    237 	kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
    238 }
    239 
    240 static inline bool
    241 __md_unwind_end(const char *name)
    242 {
    243 	if (!strcmp(name, "syscall") ||
    244 	    !strcmp(name, "alltraps") ||
    245 	    !strcmp(name, "handle_syscall") ||
    246 	    !strncmp(name, "Xtrap", 5) ||
    247 	    !strncmp(name, "Xintr", 5) ||
    248 	    !strncmp(name, "Xhandle", 7) ||
    249 	    !strncmp(name, "Xresume", 7) ||
    250 	    !strncmp(name, "Xstray", 6) ||
    251 	    !strncmp(name, "Xhold", 5) ||
    252 	    !strncmp(name, "Xrecurse", 8) ||
    253 	    !strcmp(name, "Xdoreti") ||
    254 	    !strncmp(name, "Xsoft", 5)) {
    255 		return true;
    256 	}
    257 
    258 	return false;
    259 }
    260 
    261 static void
    262 kasan_md_unwind(void)
    263 {
    264 	uint64_t *rbp, rip;
    265 	const char *mod;
    266 	const char *sym;
    267 	size_t nsym;
    268 	int error;
    269 
    270 	rbp = (uint64_t *)__builtin_frame_address(0);
    271 	nsym = 0;
    272 
    273 	while (1) {
    274 		/* 8(%rbp) contains the saved %rip. */
    275 		rip = *(rbp + 1);
    276 
    277 		if (rip < KERNBASE) {
    278 			break;
    279 		}
    280 		error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
    281 		if (error) {
    282 			break;
    283 		}
    284 		printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
    285 		if (__md_unwind_end(sym)) {
    286 			break;
    287 		}
    288 
    289 		rbp = (uint64_t *)*(rbp);
    290 		if (rbp == 0) {
    291 			break;
    292 		}
    293 		nsym++;
    294 
    295 		if (nsym >= 15) {
    296 			break;
    297 		}
    298 	}
    299 }
    300 
    301 #endif	/* _AMD64_ASAN_H_ */
    302