Home | History | Annotate | Line # | Download | only in include
asan.h revision 1.7
      1 /*	$NetBSD: asan.h,v 1.7 2020/06/23 17:21:55 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/atomic.h>
     33 #include <sys/ksyms.h>
     34 
     35 #include <aarch64/pmap.h>
     36 #include <aarch64/vmparam.h>
     37 #include <aarch64/cpufunc.h>
     38 #include <aarch64/armreg.h>
     39 #include <aarch64/machdep.h>
     40 
     41 #define __MD_VIRTUAL_SHIFT	48	/* 49bit address space, cut half */
     42 #define __MD_KERNMEM_BASE	0xFFFF000000000000 /* kern mem base address */
     43 
     44 #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
     45 #define KASAN_MD_SHADOW_START	(AARCH64_KSEG_END)
     46 #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
     47 
     48 static bool __md_early __read_mostly = true;
     49 
     50 static inline int8_t *
     51 kasan_md_addr_to_shad(const void *addr)
     52 {
     53 	vaddr_t va = (vaddr_t)addr;
     54 	return (int8_t *)(KASAN_MD_SHADOW_START +
     55 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
     56 }
     57 
     58 static inline bool
     59 kasan_md_unsupported(vaddr_t addr)
     60 {
     61 	return (addr < VM_MIN_KERNEL_ADDRESS) ||
     62 	    (addr >= VM_KERNEL_IO_ADDRESS);
     63 }
     64 
     65 static paddr_t
     66 __md_palloc(void)
     67 {
     68 	paddr_t pa;
     69 
     70 	if (__predict_false(__md_early))
     71 		pa = (paddr_t)bootpage_alloc();
     72 	else
     73 		pa = pmap_alloc_pdp(pmap_kernel(), NULL, 0, false);
     74 
     75 	return pa;
     76 }
     77 
     78 static void
     79 kasan_md_shadow_map_page(vaddr_t va)
     80 {
     81 	pd_entry_t *l0, *l1, *l2, *l3;
     82 	paddr_t l0pa, pa;
     83 	pd_entry_t pde;
     84 	size_t idx;
     85 
     86 	l0pa = reg_ttbr1_el1_read();
     87 	if (__predict_false(__md_early)) {
     88 		l0 = (void *)KERN_PHYSTOV(l0pa);
     89 	} else {
     90 		l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
     91 	}
     92 
     93 	idx = l0pde_index(va);
     94 	pde = l0[idx];
     95 	if (!l0pde_valid(pde)) {
     96 		pa = __md_palloc();
     97 		atomic_swap_64(&l0[idx], pa | L0_TABLE);
     98 	} else {
     99 		pa = l0pde_pa(pde);
    100 	}
    101 	if (__predict_false(__md_early)) {
    102 		l1 = (void *)KERN_PHYSTOV(pa);
    103 	} else {
    104 		l1 = (void *)AARCH64_PA_TO_KVA(pa);
    105 	}
    106 
    107 	idx = l1pde_index(va);
    108 	pde = l1[idx];
    109 	if (!l1pde_valid(pde)) {
    110 		pa = __md_palloc();
    111 		atomic_swap_64(&l1[idx], pa | L1_TABLE);
    112 	} else {
    113 		pa = l1pde_pa(pde);
    114 	}
    115 	if (__predict_false(__md_early)) {
    116 		l2 = (void *)KERN_PHYSTOV(pa);
    117 	} else {
    118 		l2 = (void *)AARCH64_PA_TO_KVA(pa);
    119 	}
    120 
    121 	idx = l2pde_index(va);
    122 	pde = l2[idx];
    123 	if (!l2pde_valid(pde)) {
    124 		pa = __md_palloc();
    125 		atomic_swap_64(&l2[idx], pa | L2_TABLE);
    126 	} else {
    127 		pa = l2pde_pa(pde);
    128 	}
    129 	if (__predict_false(__md_early)) {
    130 		l3 = (void *)KERN_PHYSTOV(pa);
    131 	} else {
    132 		l3 = (void *)AARCH64_PA_TO_KVA(pa);
    133 	}
    134 
    135 	idx = l3pte_index(va);
    136 	pde = l3[idx];
    137 	if (!l3pte_valid(pde)) {
    138 		pa = __md_palloc();
    139 		atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
    140 		    LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
    141 		    LX_BLKPAG_AP_RW);
    142 		aarch64_tlbi_by_va(va);
    143 	}
    144 }
    145 
    146 static void
    147 kasan_md_early_init(void *stack)
    148 {
    149 	kasan_shadow_map(stack, USPACE);
    150 	__md_early = false;
    151 }
    152 
    153 static void
    154 kasan_md_init(void)
    155 {
    156 	vaddr_t eva, dummy;
    157 
    158 	CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
    159 
    160 	/* The VAs we've created until now. */
    161 	pmap_virtual_space(&eva, &dummy);
    162 	kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
    163 	    eva - VM_MIN_KERNEL_ADDRESS);
    164 }
    165 
    166 static inline bool
    167 __md_unwind_end(const char *name)
    168 {
    169 	if (!strncmp(name, "el0_trap", 8) ||
    170 	    !strncmp(name, "el1_trap", 8)) {
    171 		return true;
    172 	}
    173 
    174 	return false;
    175 }
    176 
    177 static void
    178 kasan_md_unwind(void)
    179 {
    180 	uint64_t lr, *fp;
    181 	const char *mod;
    182 	const char *sym;
    183 	size_t nsym;
    184 	int error;
    185 
    186 	fp = (uint64_t *)__builtin_frame_address(0);
    187 	nsym = 0;
    188 
    189 	while (1) {
    190 		/*
    191 		 * normal stack frame
    192 		 *  fp[0]  saved fp(x29) value
    193 		 *  fp[1]  saved lr(x30) value
    194 		 */
    195 		lr = fp[1];
    196 
    197 		if (lr < VM_MIN_KERNEL_ADDRESS) {
    198 			break;
    199 		}
    200 		error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
    201 		if (error) {
    202 			break;
    203 		}
    204 		printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
    205 		if (__md_unwind_end(sym)) {
    206 			break;
    207 		}
    208 
    209 		fp = (uint64_t *)fp[0];
    210 		if (fp == NULL) {
    211 			break;
    212 		}
    213 		nsym++;
    214 
    215 		if (nsym >= 15) {
    216 			break;
    217 		}
    218 	}
    219 }
    220