Home | History | Annotate | Line # | Download | only in include
asan.h revision 1.17.2.1
      1  1.17.2.1   thorpej /*	$NetBSD: asan.h,v 1.17.2.1 2021/05/13 00:47:20 thorpej Exp $	*/
      2       1.1      maxv 
      3       1.1      maxv /*
      4      1.11      maxv  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
      5       1.1      maxv  * All rights reserved.
      6       1.1      maxv  *
      7      1.11      maxv  * This code is part of the KASAN subsystem of the NetBSD kernel.
      8       1.1      maxv  *
      9       1.1      maxv  * Redistribution and use in source and binary forms, with or without
     10       1.1      maxv  * modification, are permitted provided that the following conditions
     11       1.1      maxv  * are met:
     12       1.1      maxv  * 1. Redistributions of source code must retain the above copyright
     13       1.1      maxv  *    notice, this list of conditions and the following disclaimer.
     14       1.1      maxv  * 2. Redistributions in binary form must reproduce the above copyright
     15       1.1      maxv  *    notice, this list of conditions and the following disclaimer in the
     16       1.1      maxv  *    documentation and/or other materials provided with the distribution.
     17       1.1      maxv  *
     18      1.11      maxv  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19      1.11      maxv  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20      1.11      maxv  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21      1.11      maxv  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22      1.11      maxv  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23      1.11      maxv  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24      1.11      maxv  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25      1.11      maxv  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26      1.11      maxv  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27      1.11      maxv  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28      1.11      maxv  * SUCH DAMAGE.
     29       1.1      maxv  */
     30       1.1      maxv 
     31       1.1      maxv #include <sys/atomic.h>
     32       1.2     skrll #include <sys/ksyms.h>
     33       1.2     skrll 
     34      1.10  riastrad #include <uvm/uvm.h>
     35      1.10  riastrad 
     36       1.1      maxv #include <aarch64/pmap.h>
     37       1.1      maxv #include <aarch64/vmparam.h>
     38       1.1      maxv #include <aarch64/armreg.h>
     39       1.3      maxv #include <aarch64/machdep.h>
     40       1.1      maxv 
     41      1.16     skrll #include <arm/cpufunc.h>
     42      1.16     skrll 
     43       1.1      maxv #define __MD_VIRTUAL_SHIFT	48	/* 49bit address space, cut half */
     44       1.7      maxv #define __MD_KERNMEM_BASE	0xFFFF000000000000 /* kern mem base address */
     45       1.1      maxv 
     46       1.1      maxv #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
     47      1.14     skrll #define KASAN_MD_SHADOW_START	(AARCH64_DIRECTMAP_END)
     48       1.1      maxv #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
     49       1.1      maxv 
     50       1.3      maxv static bool __md_early __read_mostly = true;
     51       1.3      maxv 
     52       1.1      maxv static inline int8_t *
     53       1.1      maxv kasan_md_addr_to_shad(const void *addr)
     54       1.1      maxv {
     55       1.1      maxv 	vaddr_t va = (vaddr_t)addr;
     56       1.1      maxv 	return (int8_t *)(KASAN_MD_SHADOW_START +
     57       1.7      maxv 	    ((va - __MD_KERNMEM_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
     58       1.1      maxv }
     59       1.1      maxv 
     60       1.1      maxv static inline bool
     61       1.1      maxv kasan_md_unsupported(vaddr_t addr)
     62       1.1      maxv {
     63       1.1      maxv 	return (addr < VM_MIN_KERNEL_ADDRESS) ||
     64       1.1      maxv 	    (addr >= VM_KERNEL_IO_ADDRESS);
     65       1.1      maxv }
     66       1.1      maxv 
     67       1.1      maxv static paddr_t
     68       1.1      maxv __md_palloc(void)
     69       1.1      maxv {
     70       1.1      maxv 	paddr_t pa;
     71       1.1      maxv 
     72      1.12     skrll 	if (__predict_false(__md_early)) {
     73       1.8     skrll 		pa = (paddr_t)pmapboot_pagealloc();
     74      1.12     skrll 		return pa;
     75      1.12     skrll 	}
     76      1.12     skrll 
     77      1.12     skrll 	vaddr_t va;
     78      1.12     skrll 	if (!uvm.page_init_done) {
     79      1.12     skrll 		va = uvm_pageboot_alloc(PAGE_SIZE);
     80      1.12     skrll 		pa = AARCH64_KVA_TO_PA(va);
     81      1.12     skrll 	} else {
     82      1.12     skrll 		struct vm_page *pg;
     83      1.12     skrll retry:
     84      1.12     skrll 		pg = uvm_pagealloc(NULL, 0, NULL, 0);
     85      1.12     skrll 		if (pg == NULL) {
     86      1.12     skrll 			uvm_wait(__func__);
     87      1.12     skrll 			goto retry;
     88      1.12     skrll 		}
     89      1.12     skrll 
     90      1.12     skrll 		pa = VM_PAGE_TO_PHYS(pg);
     91      1.12     skrll 		va = AARCH64_PA_TO_KVA(pa);
     92      1.12     skrll 	}
     93       1.1      maxv 
     94      1.12     skrll 	__builtin_memset((void *)va, 0, PAGE_SIZE);
     95       1.1      maxv 	return pa;
     96       1.1      maxv }
     97       1.1      maxv 
     98       1.9      maxv static inline paddr_t
     99       1.9      maxv __md_palloc_large(void)
    100       1.9      maxv {
    101       1.9      maxv 	struct pglist pglist;
    102       1.9      maxv 	int ret;
    103       1.9      maxv 
    104       1.9      maxv 	if (!uvm.page_init_done)
    105       1.9      maxv 		return 0;
    106       1.9      maxv 
    107       1.9      maxv 	ret = uvm_pglistalloc(L2_SIZE, 0, ~0UL, L2_SIZE, 0,
    108       1.9      maxv 	    &pglist, 1, 0);
    109       1.9      maxv 	if (ret != 0)
    110       1.9      maxv 		return 0;
    111       1.9      maxv 
    112       1.9      maxv 	/* The page may not be zeroed. */
    113       1.9      maxv 	return VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
    114       1.9      maxv }
    115       1.9      maxv 
    116       1.1      maxv static void
    117       1.1      maxv kasan_md_shadow_map_page(vaddr_t va)
    118       1.1      maxv {
    119       1.1      maxv 	pd_entry_t *l0, *l1, *l2, *l3;
    120       1.1      maxv 	paddr_t l0pa, pa;
    121       1.1      maxv 	pd_entry_t pde;
    122       1.1      maxv 	size_t idx;
    123       1.1      maxv 
    124       1.1      maxv 	l0pa = reg_ttbr1_el1_read();
    125       1.3      maxv 	if (__predict_false(__md_early)) {
    126       1.3      maxv 		l0 = (void *)KERN_PHYSTOV(l0pa);
    127       1.3      maxv 	} else {
    128       1.3      maxv 		l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
    129       1.3      maxv 	}
    130       1.1      maxv 
    131       1.1      maxv 	idx = l0pde_index(va);
    132       1.1      maxv 	pde = l0[idx];
    133       1.1      maxv 	if (!l0pde_valid(pde)) {
    134       1.1      maxv 		pa = __md_palloc();
    135       1.1      maxv 		atomic_swap_64(&l0[idx], pa | L0_TABLE);
    136       1.1      maxv 	} else {
    137       1.1      maxv 		pa = l0pde_pa(pde);
    138       1.1      maxv 	}
    139       1.3      maxv 	if (__predict_false(__md_early)) {
    140       1.3      maxv 		l1 = (void *)KERN_PHYSTOV(pa);
    141       1.3      maxv 	} else {
    142       1.3      maxv 		l1 = (void *)AARCH64_PA_TO_KVA(pa);
    143       1.3      maxv 	}
    144       1.1      maxv 
    145       1.1      maxv 	idx = l1pde_index(va);
    146       1.1      maxv 	pde = l1[idx];
    147       1.1      maxv 	if (!l1pde_valid(pde)) {
    148       1.1      maxv 		pa = __md_palloc();
    149       1.1      maxv 		atomic_swap_64(&l1[idx], pa | L1_TABLE);
    150       1.1      maxv 	} else {
    151       1.1      maxv 		pa = l1pde_pa(pde);
    152       1.1      maxv 	}
    153       1.3      maxv 	if (__predict_false(__md_early)) {
    154       1.3      maxv 		l2 = (void *)KERN_PHYSTOV(pa);
    155       1.3      maxv 	} else {
    156       1.3      maxv 		l2 = (void *)AARCH64_PA_TO_KVA(pa);
    157       1.3      maxv 	}
    158       1.1      maxv 
    159       1.1      maxv 	idx = l2pde_index(va);
    160       1.1      maxv 	pde = l2[idx];
    161       1.1      maxv 	if (!l2pde_valid(pde)) {
    162       1.9      maxv 		/* If possible, use L2_BLOCK to map it in advance. */
    163       1.9      maxv 		if ((pa = __md_palloc_large()) != 0) {
    164       1.9      maxv 			atomic_swap_64(&l2[idx], pa | L2_BLOCK |
    165       1.9      maxv 			    LX_BLKPAG_UXN | LX_BLKPAG_PXN | LX_BLKPAG_AF |
    166       1.9      maxv 			    LX_BLKPAG_SH_IS | LX_BLKPAG_AP_RW);
    167       1.9      maxv 			aarch64_tlbi_by_va(va);
    168       1.9      maxv 			__builtin_memset((void *)va, 0, L2_SIZE);
    169       1.9      maxv 			return;
    170       1.9      maxv 		}
    171       1.1      maxv 		pa = __md_palloc();
    172       1.1      maxv 		atomic_swap_64(&l2[idx], pa | L2_TABLE);
    173       1.9      maxv 	} else if (l2pde_is_block(pde)) {
    174       1.9      maxv 		/* This VA is already mapped as a block. */
    175       1.9      maxv 		return;
    176       1.1      maxv 	} else {
    177       1.1      maxv 		pa = l2pde_pa(pde);
    178       1.1      maxv 	}
    179       1.3      maxv 	if (__predict_false(__md_early)) {
    180       1.3      maxv 		l3 = (void *)KERN_PHYSTOV(pa);
    181       1.3      maxv 	} else {
    182       1.3      maxv 		l3 = (void *)AARCH64_PA_TO_KVA(pa);
    183       1.3      maxv 	}
    184       1.1      maxv 
    185       1.1      maxv 	idx = l3pte_index(va);
    186       1.1      maxv 	pde = l3[idx];
    187       1.1      maxv 	if (!l3pte_valid(pde)) {
    188       1.1      maxv 		pa = __md_palloc();
    189       1.1      maxv 		atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
    190       1.4       ryo 		    LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_SH_IS |
    191      1.15     skrll 		    LX_BLKPAG_AP_RW | LX_BLKPAG_ATTR_NORMAL_WB);
    192       1.1      maxv 	}
    193  1.17.2.1   thorpej 	dsb(ishst);
    194  1.17.2.1   thorpej 	isb();
    195       1.1      maxv }
    196       1.1      maxv 
    197       1.3      maxv static void
    198       1.3      maxv kasan_md_early_init(void *stack)
    199       1.3      maxv {
    200       1.3      maxv 	kasan_shadow_map(stack, USPACE);
    201       1.3      maxv 	__md_early = false;
    202       1.3      maxv }
    203       1.1      maxv 
    204       1.1      maxv static void
    205       1.1      maxv kasan_md_init(void)
    206       1.1      maxv {
    207       1.1      maxv 
    208       1.1      maxv 	CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
    209       1.1      maxv 
    210      1.17     skrll 	extern vaddr_t kasan_kernelstart;
    211      1.17     skrll 	extern vaddr_t kasan_kernelsize;
    212      1.17     skrll 
    213      1.17     skrll 	kasan_shadow_map((void *)kasan_kernelstart, kasan_kernelsize);
    214      1.17     skrll 
    215       1.1      maxv 	/* The VAs we've created until now. */
    216      1.13     skrll 	vaddr_t eva = pmap_growkernel(VM_KERNEL_VM_BASE);
    217      1.17     skrll 	kasan_shadow_map((void *)VM_KERNEL_VM_BASE, eva - VM_KERNEL_VM_BASE);
    218       1.1      maxv }
    219       1.1      maxv 
    220       1.2     skrll static inline bool
    221       1.2     skrll __md_unwind_end(const char *name)
    222       1.2     skrll {
    223       1.2     skrll 	if (!strncmp(name, "el0_trap", 8) ||
    224       1.2     skrll 	    !strncmp(name, "el1_trap", 8)) {
    225       1.2     skrll 		return true;
    226       1.2     skrll 	}
    227       1.2     skrll 
    228       1.2     skrll 	return false;
    229       1.2     skrll }
    230       1.2     skrll 
    231       1.2     skrll static void
    232       1.2     skrll kasan_md_unwind(void)
    233       1.2     skrll {
    234       1.2     skrll 	uint64_t lr, *fp;
    235       1.2     skrll 	const char *mod;
    236       1.2     skrll 	const char *sym;
    237       1.2     skrll 	size_t nsym;
    238       1.2     skrll 	int error;
    239       1.2     skrll 
    240       1.2     skrll 	fp = (uint64_t *)__builtin_frame_address(0);
    241       1.2     skrll 	nsym = 0;
    242       1.2     skrll 
    243       1.2     skrll 	while (1) {
    244       1.2     skrll 		/*
    245       1.2     skrll 		 * normal stack frame
    246       1.2     skrll 		 *  fp[0]  saved fp(x29) value
    247       1.2     skrll 		 *  fp[1]  saved lr(x30) value
    248       1.2     skrll 		 */
    249       1.2     skrll 		lr = fp[1];
    250       1.2     skrll 
    251       1.2     skrll 		if (lr < VM_MIN_KERNEL_ADDRESS) {
    252       1.2     skrll 			break;
    253       1.2     skrll 		}
    254       1.2     skrll 		error = ksyms_getname(&mod, &sym, (vaddr_t)lr, KSYMS_PROC);
    255       1.2     skrll 		if (error) {
    256       1.2     skrll 			break;
    257       1.2     skrll 		}
    258       1.2     skrll 		printf("#%zu %p in %s <%s>\n", nsym, (void *)lr, sym, mod);
    259       1.2     skrll 		if (__md_unwind_end(sym)) {
    260       1.2     skrll 			break;
    261       1.2     skrll 		}
    262       1.2     skrll 
    263       1.2     skrll 		fp = (uint64_t *)fp[0];
    264       1.2     skrll 		if (fp == NULL) {
    265       1.2     skrll 			break;
    266       1.2     skrll 		}
    267       1.2     skrll 		nsym++;
    268       1.2     skrll 
    269       1.2     skrll 		if (nsym >= 15) {
    270       1.2     skrll 			break;
    271       1.2     skrll 		}
    272       1.2     skrll 	}
    273       1.2     skrll }
    274