Home | History | Annotate | Line # | Download | only in include
asan.h revision 1.10
      1 /*	$NetBSD: asan.h,v 1.10 2020/09/05 16:30:10 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/atomic.h>
     33 #include <sys/ksyms.h>
     34 
     35 #include <uvm/uvm.h>
     36 
     37 #include <aarch64/pmap.h>
     38 #include <aarch64/vmparam.h>
     39 #include <aarch64/cpufunc.h>
     40 #include <aarch64/armreg.h>
     41 #include <aarch64/machdep.h>
     42 
     43 #define __MD_VIRTUAL_SHIFT	48	/* 49bit address space, cut half */
     44 #define __MD_KERNMEM_BASE	0xFFFF000000000000 /* kern mem base address */
     45 
     46 #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
     47 #define KASAN_MD_SHADOW_START	(AARCH64_KSEG_END)
     48 #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
     49 
     50 static bool __md_early __read_mostly = true;
     51 
     52 static inline int8_t *
     53 kasan_md_addr_to_shad(const void *addr)
     54 {
     55 	vaddr_t va = (vaddr_t)addr;
     56 	return (int8_t *)(KASAN_MD_SHADOW_START +
     57 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
     58 }
     59 
     60 static inline bool
     61 kasan_md_unsupported(vaddr_t addr)
     62 {
     63 	return (addr < VM_MIN_KERNEL_ADDRESS) ||
     64 	    (addr >= VM_KERNEL_IO_ADDRESS);
     65 }
     66 
     67 static paddr_t
     68 __md_palloc(void)
     69 {
     70 	paddr_t pa;
     71 
     72 	if (__predict_false(__md_early))
     73 		pa = (paddr_t)pmapboot_pagealloc();
     74 	else
     75 		pa = pmap_alloc_pdp(pmap_kernel(), NULL, 0, false);
     76 
     77 	/* The page is zeroed. */
     78 	return pa;
     79 }
     80 
     81 static inline paddr_t
     82 __md_palloc_large(void)
     83 {
     84 	struct pglist pglist;
     85 	int ret;
     86 
     87 	if (!uvm.page_init_done)
     88 		return 0;
     89 
     90 	ret = uvm_pglistalloc(L2_SIZE, 0, ~0UL, L2_SIZE, 0,
     91 	    &pglist, 1, 0);
     92 	if (ret != 0)
     93 		return 0;
     94 
     95 	/* The page may not be zeroed. */
     96 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
     97 }
     98 
     99 static void
    100 kasan_md_shadow_map_page(vaddr_t va)
    101 {
    102 	pd_entry_t *l0, *l1, *l2, *l3;
    103 	paddr_t l0pa, pa;
    104 	pd_entry_t pde;
    105 	size_t idx;
    106 
    107 	l0pa = reg_ttbr1_el1_read();
    108 	if (__predict_false(__md_early)) {
    109 		l0 = (void *)KERN_PHYSTOV(l0pa);
    110 	} else {
    111 		l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
    112 	}
    113 
    114 	idx = l0pde_index(va);
    115 	pde = l0[idx];
    116 	if (!l0pde_valid(pde)) {
    117 		pa = __md_palloc();
    118 		atomic_swap_64(&l0[idx], pa | L0_TABLE);
    119 	} else {
    120 		pa = l0pde_pa(pde);
    121 	}
    122 	if (__predict_false(__md_early)) {
    123 		l1 = (void *)KERN_PHYSTOV(pa);
    124 	} else {
    125 		l1 = (void *)AARCH64_PA_TO_KVA(pa);
    126 	}
    127 
    128 	idx = l1pde_index(va);
    129 	pde = l1[idx];
    130 	if (!l1pde_valid(pde)) {
    131 		pa = __md_palloc();
    132 		atomic_swap_64(&l1[idx], pa | L1_TABLE);
    133 	} else {
    134 		pa = l1pde_pa(pde);
    135 	}
    136 	if (__predict_false(__md_early)) {
    137 		l2 = (void *)KERN_PHYSTOV(pa);
    138 	} else {
    139 		l2 = (void *)AARCH64_PA_TO_KVA(pa);
    140 	}
    141 
    142 	idx = l2pde_index(va);
    143 	pde = l2[idx];
    144 	if (!l2pde_valid(pde)) {
    145 		/* If possible, use L2_BLOCK to map it in advance. */
    146 		if ((pa = __md_palloc_large()) != 0) {
    147 			atomic_swap_64(&l2[idx], pa | L2_BLOCK |
    148 			    LX_BLKPAG_UXN | LX_BLKPAG_PXN | LX_BLKPAG_AF |
    149 			    LX_BLKPAG_SH_IS | LX_BLKPAG_AP_RW);
    150 			aarch64_tlbi_by_va(va);
    151 			__builtin_memset((void *)va, 0, L2_SIZE);
    152 			return;
    153 		}
    154 		pa = __md_palloc();
    155 		atomic_swap_64(&l2[idx], pa | L2_TABLE);
    156 	} else if (l2pde_is_block(pde)) {
    157 		/* This VA is already mapped as a block. */
    158 		return;
    159 	} else {
    160 		pa = l2pde_pa(pde);
    161 	}
    162 	if (__predict_false(__md_early)) {
    163 		l3 = (void *)KERN_PHYSTOV(pa);
    164 	} else {
    165 		l3 = (void *)AARCH64_PA_TO_KVA(pa);
    166 	}
    167 
    168 	idx = l3pte_index(va);
    169 	pde = l3[idx];
    170 	if (!l3pte_valid(pde)) {
    171 		pa = __md_palloc();
    172 		atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
    173 		    LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
    174 		    LX_BLKPAG_AP_RW);
    175 		aarch64_tlbi_by_va(va);
    176 	}
    177 }
    178 
    179 static void
    180 kasan_md_early_init(void *stack)
    181 {
    182 	kasan_shadow_map(stack, USPACE);
    183 	__md_early = false;
    184 }
    185 
    186 static void
    187 kasan_md_init(void)
    188 {
    189 	vaddr_t eva, dummy;
    190 
    191 	CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
    192 
    193 	/* The VAs we've created until now. */
    194 	pmap_virtual_space(&eva, &dummy);
    195 	kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
    196 	    eva - VM_MIN_KERNEL_ADDRESS);
    197 }
    198 
    199 static inline bool
    200 __md_unwind_end(const char *name)
    201 {
    202 	if (!strncmp(name, "el0_trap", 8) ||
    203 	    !strncmp(name, "el1_trap", 8)) {
    204 		return true;
    205 	}
    206 
    207 	return false;
    208 }
    209 
    210 static void
    211 kasan_md_unwind(void)
    212 {
    213 	uint64_t lr, *fp;
    214 	const char *mod;
    215 	const char *sym;
    216 	size_t nsym;
    217 	int error;
    218 
    219 	fp = (uint64_t *)__builtin_frame_address(0);
    220 	nsym = 0;
    221 
    222 	while (1) {
    223 		/*
    224 		 * normal stack frame
    225 		 *  fp[0]  saved fp(x29) value
    226 		 *  fp[1]  saved lr(x30) value
    227 		 */
    228 		lr = fp[1];
    229 
    230 		if (lr < VM_MIN_KERNEL_ADDRESS) {
    231 			break;
    232 		}
    233 		error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
    234 		if (error) {
    235 			break;
    236 		}
    237 		printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
    238 		if (__md_unwind_end(sym)) {
    239 			break;
    240 		}
    241 
    242 		fp = (uint64_t *)fp[0];
    243 		if (fp == NULL) {
    244 			break;
    245 		}
    246 		nsym++;
    247 
    248 		if (nsym >= 15) {
    249 			break;
    250 		}
    251 	}
    252 }
    253