Home | History | Annotate | Line # | Download | only in include
asan.h revision 1.1
      1 /*	$NetBSD: asan.h,v 1.1 2018/11/01 20:34:50 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/atomic.h>
     33 #include <aarch64/pmap.h>
     34 #include <aarch64/vmparam.h>
     35 #include <aarch64/cpufunc.h>
     36 #include <aarch64/armreg.h>
     37 
     38 #define __MD_VIRTUAL_SHIFT	48	/* 49bit address space, cut half */
     39 #define __MD_CANONICAL_BASE	0xFFFF000000000000
     40 
     41 #define __MD_SHADOW_SIZE	(1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
     42 #define KASAN_MD_SHADOW_START	(AARCH64_KSEG_END)
     43 #define KASAN_MD_SHADOW_END	(KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
     44 
     45 static inline int8_t *
     46 kasan_md_addr_to_shad(const void *addr)
     47 {
     48 	vaddr_t va = (vaddr_t)addr;
     49 	return (int8_t *)(KASAN_MD_SHADOW_START +
     50 	    ((va - __MD_CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
     51 }
     52 
     53 static inline bool
     54 kasan_md_unsupported(vaddr_t addr)
     55 {
     56 	return (addr < VM_MIN_KERNEL_ADDRESS) ||
     57 	    (addr >= VM_KERNEL_IO_ADDRESS);
     58 }
     59 
     60 static paddr_t
     61 __md_palloc(void)
     62 {
     63 	paddr_t pa;
     64 
     65 	pmap_alloc_pdp(pmap_kernel(), &pa);
     66 
     67 	return pa;
     68 }
     69 
     70 static void
     71 kasan_md_shadow_map_page(vaddr_t va)
     72 {
     73 	pd_entry_t *l0, *l1, *l2, *l3;
     74 	paddr_t l0pa, pa;
     75 	pd_entry_t pde;
     76 	size_t idx;
     77 
     78 	l0pa = reg_ttbr1_el1_read();
     79 	l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
     80 
     81 	idx = l0pde_index(va);
     82 	pde = l0[idx];
     83 	if (!l0pde_valid(pde)) {
     84 		pa = __md_palloc();
     85 		atomic_swap_64(&l0[idx], pa | L0_TABLE);
     86 	} else {
     87 		pa = l0pde_pa(pde);
     88 	}
     89 	l1 = (void *)AARCH64_PA_TO_KVA(pa);
     90 
     91 	idx = l1pde_index(va);
     92 	pde = l1[idx];
     93 	if (!l1pde_valid(pde)) {
     94 		pa = __md_palloc();
     95 		atomic_swap_64(&l1[idx], pa | L1_TABLE);
     96 	} else {
     97 		pa = l1pde_pa(pde);
     98 	}
     99 	l2 = (void *)AARCH64_PA_TO_KVA(pa);
    100 
    101 	idx = l2pde_index(va);
    102 	pde = l2[idx];
    103 	if (!l2pde_valid(pde)) {
    104 		pa = __md_palloc();
    105 		atomic_swap_64(&l2[idx], pa | L2_TABLE);
    106 	} else {
    107 		pa = l2pde_pa(pde);
    108 	}
    109 	l3 = (void *)AARCH64_PA_TO_KVA(pa);
    110 
    111 	idx = l3pte_index(va);
    112 	pde = l3[idx];
    113 	if (!l3pte_valid(pde)) {
    114 		pa = __md_palloc();
    115 		atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
    116 		    LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_AP_RW);
    117 		aarch64_tlbi_by_va(va);
    118 	}
    119 }
    120 
    121 #define kasan_md_early_init(a)	__nothing
    122 
    123 static void
    124 kasan_md_init(void)
    125 {
    126 	vaddr_t eva, dummy;
    127 
    128 	CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
    129 
    130 	/* The VAs we've created until now. */
    131 	pmap_virtual_space(&eva, &dummy);
    132 	kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
    133 	    eva - VM_MIN_KERNEL_ADDRESS);
    134 }
    135 
    136 #define kasan_md_unwind()	__nothing
    137