Home | History | Annotate | Line # | Download | only in include
      1  1.8     skrll /*	$NetBSD: asan.h,v 1.8 2022/04/02 11:16:07 skrll Exp $	*/
      2  1.1     skrll 
      3  1.1     skrll /*
      4  1.1     skrll  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  1.1     skrll  * All rights reserved.
      6  1.1     skrll  *
      7  1.1     skrll  * This code is derived from software contributed to The NetBSD Foundation
      8  1.6      maxv  * by Nick Hudson, and is part of the KASAN subsystem of the NetBSD kernel.
      9  1.1     skrll  *
     10  1.1     skrll  * Redistribution and use in source and binary forms, with or without
     11  1.1     skrll  * modification, are permitted provided that the following conditions
     12  1.1     skrll  * are met:
     13  1.1     skrll  * 1. Redistributions of source code must retain the above copyright
     14  1.1     skrll  *    notice, this list of conditions and the following disclaimer.
     15  1.1     skrll  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1     skrll  *    notice, this list of conditions and the following disclaimer in the
     17  1.1     skrll  *    documentation and/or other materials provided with the distribution.
     18  1.1     skrll  *
     19  1.1     skrll  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1     skrll  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1     skrll  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1     skrll  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1     skrll  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1     skrll  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1     skrll  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1     skrll  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1     skrll  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1     skrll  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1     skrll  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1     skrll  */
     31  1.1     skrll 
     32  1.8     skrll #include "opt_efi.h"
     33  1.8     skrll 
     34  1.1     skrll #include <sys/atomic.h>
     35  1.1     skrll #include <sys/ksyms.h>
     36  1.1     skrll 
     37  1.5  riastrad #include <uvm/uvm.h>
     38  1.5  riastrad 
     39  1.1     skrll #include <arm/vmparam.h>
     40  1.1     skrll #include <arm/arm32/machdep.h>
     41  1.1     skrll #include <arm/arm32/pmap.h>
     42  1.1     skrll 
     43  1.1     skrll #define KASAN_MD_SHADOW_START	VM_KERNEL_KASAN_BASE
     44  1.1     skrll #define KASAN_MD_SHADOW_END	VM_KERNEL_KASAN_END
     45  1.1     skrll #define __MD_KERNMEM_BASE	KERNEL_BASE
     46  1.1     skrll 
     47  1.1     skrll static inline int8_t *
     48  1.1     skrll kasan_md_addr_to_shad(const void *addr)
     49  1.1     skrll {
     50  1.1     skrll 	vaddr_t va = (vaddr_t)addr;
     51  1.1     skrll 	return (int8_t *)(KASAN_MD_SHADOW_START +
     52  1.1     skrll 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
     53  1.1     skrll }
     54  1.1     skrll 
     55  1.1     skrll static inline bool
     56  1.1     skrll kasan_md_unsupported(vaddr_t addr)
     57  1.1     skrll {
     58  1.1     skrll 	return addr < VM_MIN_KERNEL_ADDRESS ||
     59  1.1     skrll 	    addr >= KASAN_MD_SHADOW_START;
     60  1.1     skrll }
     61  1.1     skrll 
     62  1.1     skrll /* -------------------------------------------------------------------------- */
     63  1.1     skrll 
     64  1.1     skrll /*
     65  1.1     skrll  * Early mapping, used to map just the stack at boot time. We rely on the fact
     66  1.1     skrll  * that VA = PA + KERNEL_BASE.
     67  1.1     skrll  */
     68  1.1     skrll 
     69  1.4     skrll /*
     70  1.4     skrll  * KASAN_NEARLYPAGES is hard to work out.
     71  1.4     skrll  *
     72  1.4     skrll  * The INIT_ARM_TOTAL_STACK shadow is reduced by the KASAN_SHADOW_SCALE_SIZE
     73  1.4     skrll  * factor. This shadow mapping is likely to span more than one L2 page tables
     74  1.4     skrll  * and, as a result, more than one PAGE_SIZE block. The L2 page tables might
     75  1.4     skrll  * span more than one L1 page table entry as well.
     76  1.4     skrll  *
     77  1.4     skrll  * To ensure we have enough start with the assumption of 1 L1 page table, and
     78  1.4     skrll  * the number of pages to map the shadow... then double for the spanning as
     79  1.4     skrll  * described above
     80  1.4     skrll  */
     81  1.4     skrll 
     82  1.4     skrll #define KASAN_NEARLYPAGES	\
     83  1.4     skrll     (2 * (1 + howmany(INIT_ARM_TOTAL_STACK / KASAN_SHADOW_SCALE_SIZE, PAGE_SIZE)))
     84  1.1     skrll 
     85  1.1     skrll static bool __md_early __read_mostly;
     86  1.4     skrll static size_t __md_nearlyl1pts __attribute__((__section__(".data"))) = 0;
     87  1.1     skrll static size_t __md_nearlypages __attribute__((__section__(".data")));
     88  1.1     skrll static uint8_t __md_earlypages[KASAN_NEARLYPAGES * PAGE_SIZE]
     89  1.1     skrll     __aligned(PAGE_SIZE)  __attribute__((__section__(".data")));
     90  1.1     skrll 
     91  1.1     skrll static vaddr_t
     92  1.1     skrll __md_palloc(void)
     93  1.1     skrll {
     94  1.1     skrll 	paddr_t pa;
     95  1.1     skrll 
     96  1.1     skrll 	if (__predict_false(__md_early)) {
     97  1.1     skrll 		KASSERTMSG(__md_nearlypages < KASAN_NEARLYPAGES,
     98  1.1     skrll 		    "__md_nearlypages %zu", __md_nearlypages);
     99  1.1     skrll 
    100  1.1     skrll 		vaddr_t va = (vaddr_t)(&__md_earlypages[0] + __md_nearlypages * PAGE_SIZE);
    101  1.1     skrll 		__md_nearlypages++;
    102  1.1     skrll 		__builtin_memset((void *)va, 0, PAGE_SIZE);
    103  1.1     skrll 
    104  1.1     skrll 		return KERN_VTOPHYS(va);
    105  1.1     skrll 	}
    106  1.1     skrll 
    107  1.1     skrll 	if (!uvm.page_init_done) {
    108  1.1     skrll 		if (uvm_page_physget(&pa) == false)
    109  1.1     skrll 			panic("KASAN can't get a page");
    110  1.1     skrll 
    111  1.1     skrll 		return pa;
    112  1.1     skrll 	}
    113  1.1     skrll 
    114  1.1     skrll 	struct vm_page *pg;
    115  1.1     skrll retry:
    116  1.1     skrll 	pg = uvm_pagealloc(NULL, 0, NULL, 0);
    117  1.1     skrll 	if (pg == NULL) {
    118  1.1     skrll 		uvm_wait(__func__);
    119  1.1     skrll 		goto retry;
    120  1.1     skrll 	}
    121  1.1     skrll 	pa = VM_PAGE_TO_PHYS(pg);
    122  1.1     skrll 
    123  1.1     skrll 	return pa;
    124  1.1     skrll }
    125  1.1     skrll 
    126  1.1     skrll static void
    127  1.1     skrll kasan_md_shadow_map_page(vaddr_t va)
    128  1.1     skrll {
    129  1.1     skrll 	const uint32_t mask = L1_TABLE_SIZE - 1;
    130  1.1     skrll 	const paddr_t ttb = (paddr_t)(armreg_ttbr1_read() & ~mask);
    131  1.1     skrll 	pd_entry_t * const pdep = (pd_entry_t *)KERN_PHYSTOV(ttb);
    132  1.1     skrll 
    133  1.1     skrll 	const size_t l1slot = l1pte_index(va);
    134  1.1     skrll 	vaddr_t l2ptva;
    135  1.1     skrll 
    136  1.1     skrll 	KASSERT((va & PAGE_MASK) == 0);
    137  1.1     skrll 
    138  1.4     skrll 	extern bool kasan_l2pts_created;
    139  1.4     skrll 	if (__predict_true(kasan_l2pts_created)) {
    140  1.1     skrll 		/*
    141  1.1     skrll 		 * The shadow map area L2PTs were allocated and mapped
    142  1.1     skrll 		 * by arm32_kernel_vm_init.  Use the array of pv_addr_t
    143  1.1     skrll 		 * to get the l2ptva.
    144  1.1     skrll 		 */
    145  1.1     skrll 		extern pv_addr_t kasan_l2pt[];
    146  1.1     skrll 		const size_t off = va - KASAN_MD_SHADOW_START;
    147  1.1     skrll 		const size_t segoff = off & (L2_S_SEGSIZE - 1);
    148  1.1     skrll 		const size_t idx = off / L2_S_SEGSIZE;
    149  1.1     skrll 		const vaddr_t segl2ptva = kasan_l2pt[idx].pv_va;
    150  1.1     skrll 		l2ptva = segl2ptva + l1pte_index(segoff) * L2_TABLE_SIZE_REAL;
    151  1.4     skrll 	} else {
    152  1.4     skrll 		/*
    153  1.4     skrll 		 * An L1PT entry is/may be required for bootstrap tables.  As a
    154  1.4     skrll 		 * page gives enough space to multiple L2PTs the previous call
    155  1.4     skrll 		 * might have already created the L2PT.
    156  1.4     skrll 		 */
    157  1.4     skrll 		if (!l1pte_page_p(pdep[l1slot])) {
    158  1.4     skrll 			const paddr_t l2ptpa = __md_palloc();
    159  1.4     skrll 			const vaddr_t segl2va = va & -L2_S_SEGSIZE;
    160  1.4     skrll 			const size_t segl1slot = l1pte_index(segl2va);
    161  1.4     skrll 
    162  1.4     skrll 			__md_nearlyl1pts++;
    163  1.4     skrll 
    164  1.4     skrll 			const pd_entry_t npde =
    165  1.4     skrll 			    L1_C_PROTO | l2ptpa | L1_C_DOM(PMAP_DOMAIN_KERNEL);
    166  1.4     skrll 
    167  1.4     skrll 			l1pte_set(pdep + segl1slot, npde);
    168  1.4     skrll 			/*
    169  1.4     skrll 			 * No need for PDE_SYNC_RANGE here as we're creating
    170  1.4     skrll 			 * the bootstrap tables
    171  1.4     skrll 			*/
    172  1.4     skrll 		}
    173  1.4     skrll 		l2ptva = KERN_PHYSTOV(l1pte_pa(pdep[l1slot]));
    174  1.1     skrll 	}
    175  1.1     skrll 
    176  1.1     skrll 	pt_entry_t * l2pt = (pt_entry_t *)l2ptva;
    177  1.1     skrll 	pt_entry_t * const ptep = &l2pt[l2pte_index(va)];
    178  1.1     skrll 
    179  1.1     skrll 	if (!l2pte_valid_p(*ptep)) {
    180  1.1     skrll 		const int prot = VM_PROT_READ | VM_PROT_WRITE;
    181  1.1     skrll 		const paddr_t pa = __md_palloc();
    182  1.1     skrll 		pt_entry_t npte =
    183  1.1     skrll 		    L2_S_PROTO |
    184  1.1     skrll 		    pa |
    185  1.4     skrll 		    (__md_early ? 0 : pte_l2_s_cache_mode_pt) |
    186  1.1     skrll 		    L2_S_PROT(PTE_KERNEL, prot);
    187  1.4     skrll 		l2pte_set(ptep, npte, 0);
    188  1.4     skrll 
    189  1.4     skrll 		if (!__md_early)
    190  1.4     skrll 			PTE_SYNC(ptep);
    191  1.1     skrll 
    192  1.1     skrll 		__builtin_memset((void *)va, 0, PAGE_SIZE);
    193  1.1     skrll 	}
    194  1.1     skrll }
    195  1.1     skrll 
    196  1.1     skrll /*
    197  1.1     skrll  * Map the init stacks of the BP and APs. We will map the rest in kasan_init.
    198  1.1     skrll  */
    199  1.1     skrll static void
    200  1.1     skrll kasan_md_early_init(void *stack)
    201  1.1     skrll {
    202  1.1     skrll 
    203  1.4     skrll 	/*
    204  1.4     skrll 	 * We come through here twice.  The first time is for generic_start
    205  1.4     skrll 	 * and the bootstrap tables.  The second is for arm32_kernel_vm_init
    206  1.4     skrll 	 * and the real tables.
    207  1.4     skrll 	 *
    208  1.4     skrll 	 * In the first we have to create L1PT entries, whereas in the
    209  1.4     skrll 	 * second arm32_kernel_vm_init has setup kasan_l1pts (and the L1PT
    210  1.4     skrll 	 * entries for them
    211  1.4     skrll 	 */
    212  1.1     skrll 	__md_early = true;
    213  1.4     skrll 	__md_nearlypages = __md_nearlyl1pts;
    214  1.4     skrll 	kasan_shadow_map(stack, INIT_ARM_TOTAL_STACK);
    215  1.1     skrll 	__md_early = false;
    216  1.1     skrll }
    217  1.1     skrll 
    218  1.1     skrll static void
    219  1.1     skrll kasan_md_init(void)
    220  1.1     skrll {
    221  1.1     skrll 	extern vaddr_t kasan_kernelstart;
    222  1.1     skrll 	extern vaddr_t kasan_kernelsize;
    223  1.1     skrll 
    224  1.1     skrll 	kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize);
    225  1.1     skrll 
    226  1.1     skrll 	/* The VAs we've created until now. */
    227  1.7     skrll 	vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
    228  1.2     skrll 	kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE);
    229  1.1     skrll }
    230  1.1     skrll 
    231  1.1     skrll 
    232  1.1     skrll static inline bool
    233  1.1     skrll __md_unwind_end(const char *name)
    234  1.1     skrll {
    235  1.1     skrll 	static const char * const vectors[] = {
    236  1.1     skrll 		"undefined_entry",
    237  1.1     skrll 		"swi_entry",
    238  1.1     skrll 		"prefetch_abort_entry",
    239  1.1     skrll 		"data_abort_entry",
    240  1.1     skrll 		"address_exception_entry",
    241  1.1     skrll 		"irq_entry",
    242  1.1     skrll 		"fiqvector"
    243  1.1     skrll 	};
    244  1.1     skrll 
    245  1.1     skrll 	for (size_t i = 0; i < __arraycount(vectors); i++) {
    246  1.1     skrll 		if (!strncmp(name, vectors[i], strlen(vectors[i])))
    247  1.1     skrll 			return true;
    248  1.1     skrll 	}
    249  1.1     skrll 
    250  1.1     skrll 	return false;
    251  1.1     skrll }
    252  1.1     skrll 
    253  1.1     skrll static void
    254  1.1     skrll kasan_md_unwind(void)
    255  1.1     skrll {
    256  1.1     skrll 	uint32_t lr, *fp;
    257  1.1     skrll 	const char *mod;
    258  1.1     skrll 	const char *sym;
    259  1.1     skrll 	size_t nsym;
    260  1.1     skrll 	int error;
    261  1.1     skrll 
    262  1.1     skrll 	fp = (uint32_t *)__builtin_frame_address(0);
    263  1.1     skrll 	nsym = 0;
    264  1.1     skrll 
    265  1.1     skrll 	while (1) {
    266  1.1     skrll 		/*
    267  1.1     skrll 		 * normal frame
    268  1.1     skrll 		 *  fp[ 0] saved code pointer
    269  1.1     skrll 		 *  fp[-1] saved lr value
    270  1.1     skrll 		 *  fp[-2] saved sp value
    271  1.1     skrll 		 *  fp[-3] saved fp value
    272  1.1     skrll 		 */
    273  1.1     skrll 		lr = fp[-1];
    274  1.1     skrll 
    275  1.1     skrll 		if (lr < VM_MIN_KERNEL_ADDRESS) {
    276  1.1     skrll 			break;
    277  1.1     skrll 		}
    278  1.1     skrll 		error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
    279  1.1     skrll 		if (error) {
    280  1.1     skrll 			break;
    281  1.1     skrll 		}
    282  1.1     skrll 		printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
    283  1.1     skrll 		if (__md_unwind_end(sym)) {
    284  1.1     skrll 			break;
    285  1.1     skrll 		}
    286  1.1     skrll 
    287  1.1     skrll 		fp = (uint32_t *)fp[-3];
    288  1.1     skrll 		if (fp == NULL) {
    289  1.1     skrll 			break;
    290  1.1     skrll 		}
    291  1.1     skrll 		nsym++;
    292  1.1     skrll 
    293  1.1     skrll 		if (nsym >= 15) {
    294  1.1     skrll 			break;
    295  1.1     skrll 		}
    296  1.1     skrll 	}
    297  1.1     skrll }
    298