Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: msan.h,v 1.8 2022/09/13 09:39:49 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2019-2020 Maxime Villard, m00nbsd.net
      5  * All rights reserved.
      6  *
      7  * This code is part of the KMSAN subsystem of the NetBSD kernel.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 #ifndef	_AMD64_MSAN_H_
     32 #define	_AMD64_MSAN_H_
     33 
     34 #include <sys/ksyms.h>
     35 
     36 #include <uvm/uvm.h>
     37 
     38 #include <machine/pmap.h>
     39 #include <machine/pmap_private.h>
     40 #include <machine/pte.h>
     41 #include <machine/vmparam.h>
     42 
     43 #include <x86/bootspace.h>
     44 
     45 #ifdef __HAVE_PCPU_AREA
     46 #error "PCPU area not allowed with KMSAN"
     47 #endif
     48 #ifdef __HAVE_DIRECT_MAP
     49 #error "DMAP not allowed with KMSAN"
     50 #endif
     51 
     52 /*
     53  * One big shadow, divided in two sub-shadows (SHAD and ORIG), themselves
     54  * divided in two regions (MAIN and KERN).
     55  */
     56 
     57 #define __MD_SHADOW_SIZE	0x20000000000ULL	/* 4 * NBPD_L4 */
     58 #define __MD_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KMSAN * NBPD_L4)))
     59 #define __MD_SHADOW_END		(__MD_SHADOW_START + __MD_SHADOW_SIZE)
     60 
     61 #define __MD_SHAD_MAIN_START	(__MD_SHADOW_START)
     62 #define __MD_SHAD_KERN_START	(__MD_SHADOW_START + 0x8000000000ULL)
     63 
     64 #define __MD_ORIG_MAIN_START	(__MD_SHAD_KERN_START + 0x8000000000ULL)
     65 #define __MD_ORIG_KERN_START	(__MD_ORIG_MAIN_START + 0x8000000000ULL)
     66 
     67 #define __MD_PTR_BASE		0xFFFFFFFF80000000ULL
     68 #define __MD_ORIG_TYPE		__BITS(31,28)
     69 
     70 static inline int8_t *
     71 kmsan_md_addr_to_shad(const void *addr)
     72 {
     73 	vaddr_t va = (vaddr_t)addr;
     74 
     75 	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
     76 		return (int8_t *)(__MD_SHAD_MAIN_START + (va - vm_min_kernel_address));
     77 	} else if (va >= KERNBASE) {
     78 		return (int8_t *)(__MD_SHAD_KERN_START + (va - KERNBASE));
     79 	} else {
     80 		panic("%s: impossible, va=%p", __func__, (void *)va);
     81 	}
     82 }
     83 
     84 static inline int8_t *
     85 kmsan_md_addr_to_orig(const void *addr)
     86 {
     87 	vaddr_t va = (vaddr_t)addr;
     88 
     89 	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
     90 		return (int8_t *)(__MD_ORIG_MAIN_START + (va - vm_min_kernel_address));
     91 	} else if (va >= KERNBASE) {
     92 		return (int8_t *)(__MD_ORIG_KERN_START + (va - KERNBASE));
     93 	} else {
     94 		panic("%s: impossible, va=%p", __func__, (void *)va);
     95 	}
     96 }
     97 
     98 static inline bool
     99 kmsan_md_unsupported(vaddr_t addr)
    100 {
    101 	return (addr >= (vaddr_t)PTE_BASE &&
    102 	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
    103 }
    104 
    105 static inline paddr_t
    106 __md_palloc(void)
    107 {
    108 	/* The page is zeroed. */
    109 	return pmap_get_physpage();
    110 }
    111 
    112 static inline paddr_t
    113 __md_palloc_large(void)
    114 {
    115 	struct pglist pglist;
    116 	int ret;
    117 
    118 	if (!uvm.page_init_done)
    119 		return 0;
    120 
    121 	kmsan_init_arg(sizeof(psize_t) + 4 * sizeof(paddr_t) +
    122 	    sizeof(struct pglist *) + 2 * sizeof(int));
    123 	ret = uvm_pglistalloc(NBPD_L2, 0, ~0UL, NBPD_L2, 0,
    124 	    &pglist, 1, 0);
    125 	if (ret != 0)
    126 		return 0;
    127 
    128 	/* The page may not be zeroed. */
    129 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
    130 }
    131 
    132 static void
    133 kmsan_md_shadow_map_page(vaddr_t va)
    134 {
    135 	const pt_entry_t pteflags = PTE_W | pmap_pg_nx | PTE_P;
    136 	paddr_t pa;
    137 
    138 	KASSERT(va >= __MD_SHADOW_START && va < __MD_SHADOW_END);
    139 
    140 	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
    141 		pa = __md_palloc();
    142 		L4_BASE[pl4_i(va)] = pa | pteflags;
    143 	}
    144 	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
    145 		pa = __md_palloc();
    146 		L3_BASE[pl3_i(va)] = pa | pteflags;
    147 	}
    148 	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
    149 		if ((pa = __md_palloc_large()) != 0) {
    150 			L2_BASE[pl2_i(va)] = pa | pteflags | PTE_PS |
    151 			    pmap_pg_g;
    152 			__insn_barrier();
    153 			__builtin_memset((void *)va, 0, NBPD_L2);
    154 			return;
    155 		}
    156 		pa = __md_palloc();
    157 		L2_BASE[pl2_i(va)] = pa | pteflags;
    158 	} else if (L2_BASE[pl2_i(va)] & PTE_PS) {
    159 		return;
    160 	}
    161 	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
    162 		pa = __md_palloc();
    163 		L1_BASE[pl1_i(va)] = pa | pteflags | pmap_pg_g;
    164 	}
    165 }
    166 
    167 static void
    168 kmsan_md_init(void)
    169 {
    170 	extern struct bootspace bootspace;
    171 	size_t i;
    172 
    173 	CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KMSAN);
    174 
    175 	/* Kernel. */
    176 	for (i = 0; i < BTSPACE_NSEGS; i++) {
    177 		if (bootspace.segs[i].type == BTSEG_NONE) {
    178 			continue;
    179 		}
    180 		kmsan_shadow_map((void *)bootspace.segs[i].va,
    181 		    bootspace.segs[i].sz);
    182 	}
    183 
    184 	/* Boot region. */
    185 	kmsan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
    186 
    187 	/* Module map. */
    188 	kmsan_shadow_map((void *)bootspace.smodule,
    189 	    (size_t)(bootspace.emodule - bootspace.smodule));
    190 
    191 	/* The bootstrap spare va. */
    192 	kmsan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
    193 }
    194 
    195 static inline msan_orig_t
    196 kmsan_md_orig_encode(int type, uintptr_t ptr)
    197 {
    198 	msan_orig_t ret;
    199 
    200 	ret = (ptr & 0xFFFFFFFF) & ~__MD_ORIG_TYPE;
    201 	ret |= __SHIFTIN(type, __MD_ORIG_TYPE);
    202 
    203 	return ret;
    204 }
    205 
    206 static inline void
    207 kmsan_md_orig_decode(msan_orig_t orig, int *type, uintptr_t *ptr)
    208 {
    209 	*type = __SHIFTOUT(orig, __MD_ORIG_TYPE);
    210 	*ptr = (uintptr_t)(orig & ~__MD_ORIG_TYPE) | __MD_PTR_BASE;
    211 }
    212 
    213 static inline bool
    214 kmsan_md_is_pc(uintptr_t ptr)
    215 {
    216 	extern uint8_t __rodata_start;
    217 
    218 	return (ptr < (uintptr_t)&__rodata_start);
    219 }
    220 
    221 static inline bool
    222 __md_unwind_end(const char *name)
    223 {
    224 	if (!strcmp(name, "syscall") ||
    225 	    !strcmp(name, "alltraps") ||
    226 	    !strcmp(name, "handle_syscall") ||
    227 	    !strncmp(name, "Xtrap", 5) ||
    228 	    !strncmp(name, "Xintr", 5) ||
    229 	    !strncmp(name, "Xhandle", 7) ||
    230 	    !strncmp(name, "Xresume", 7) ||
    231 	    !strncmp(name, "Xstray", 6) ||
    232 	    !strncmp(name, "Xhold", 5) ||
    233 	    !strncmp(name, "Xrecurse", 8) ||
    234 	    !strcmp(name, "Xdoreti") ||
    235 	    !strncmp(name, "Xsoft", 5)) {
    236 		return true;
    237 	}
    238 
    239 	return false;
    240 }
    241 
    242 static void
    243 kmsan_md_unwind(void)
    244 {
    245 	uint64_t *rbp, rip;
    246 	const char *mod;
    247 	const char *sym;
    248 	size_t nsym;
    249 	int error;
    250 
    251 	rbp = (uint64_t *)__builtin_frame_address(0);
    252 	nsym = 0;
    253 
    254 	while (1) {
    255 		/* 8(%rbp) contains the saved %rip. */
    256 		rip = *(rbp + 1);
    257 
    258 		if (rip < KERNBASE) {
    259 			break;
    260 		}
    261 		error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
    262 		if (error) {
    263 			break;
    264 		}
    265 		kmsan_printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
    266 		if (__md_unwind_end(sym)) {
    267 			break;
    268 		}
    269 
    270 		rbp = (uint64_t *)*(rbp);
    271 		if (rbp == 0) {
    272 			break;
    273 		}
    274 		nsym++;
    275 
    276 		if (nsym >= 15) {
    277 			break;
    278 		}
    279 	}
    280 }
    281 
    282 #endif	/* _AMD64_MSAN_H_ */
    283