asan.h revision 1.1 1 1.1 maxv /* $NetBSD: asan.h,v 1.1 2018/11/01 20:34:50 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.1 maxv * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
8 1.1 maxv * by Maxime Villard.
9 1.1 maxv *
10 1.1 maxv * Redistribution and use in source and binary forms, with or without
11 1.1 maxv * modification, are permitted provided that the following conditions
12 1.1 maxv * are met:
13 1.1 maxv * 1. Redistributions of source code must retain the above copyright
14 1.1 maxv * notice, this list of conditions and the following disclaimer.
15 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 maxv * notice, this list of conditions and the following disclaimer in the
17 1.1 maxv * documentation and/or other materials provided with the distribution.
18 1.1 maxv *
19 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
30 1.1 maxv */
31 1.1 maxv
32 1.1 maxv #include <sys/atomic.h>
33 1.1 maxv #include <aarch64/pmap.h>
34 1.1 maxv #include <aarch64/vmparam.h>
35 1.1 maxv #include <aarch64/cpufunc.h>
36 1.1 maxv #include <aarch64/armreg.h>
37 1.1 maxv
38 1.1 maxv #define __MD_VIRTUAL_SHIFT 48 /* 49bit address space, cut half */
39 1.1 maxv #define __MD_CANONICAL_BASE 0xFFFF000000000000
40 1.1 maxv
41 1.1 maxv #define __MD_SHADOW_SIZE (1ULL << (__MD_VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
42 1.1 maxv #define KASAN_MD_SHADOW_START (AARCH64_KSEG_END)
43 1.1 maxv #define KASAN_MD_SHADOW_END (KASAN_MD_SHADOW_START + __MD_SHADOW_SIZE)
44 1.1 maxv
45 1.1 maxv static inline int8_t *
46 1.1 maxv kasan_md_addr_to_shad(const void *addr)
47 1.1 maxv {
48 1.1 maxv vaddr_t va = (vaddr_t)addr;
49 1.1 maxv return (int8_t *)(KASAN_MD_SHADOW_START +
50 1.1 maxv ((va - __MD_CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
51 1.1 maxv }
52 1.1 maxv
53 1.1 maxv static inline bool
54 1.1 maxv kasan_md_unsupported(vaddr_t addr)
55 1.1 maxv {
56 1.1 maxv return (addr < VM_MIN_KERNEL_ADDRESS) ||
57 1.1 maxv (addr >= VM_KERNEL_IO_ADDRESS);
58 1.1 maxv }
59 1.1 maxv
60 1.1 maxv static paddr_t
61 1.1 maxv __md_palloc(void)
62 1.1 maxv {
63 1.1 maxv paddr_t pa;
64 1.1 maxv
65 1.1 maxv pmap_alloc_pdp(pmap_kernel(), &pa);
66 1.1 maxv
67 1.1 maxv return pa;
68 1.1 maxv }
69 1.1 maxv
70 1.1 maxv static void
71 1.1 maxv kasan_md_shadow_map_page(vaddr_t va)
72 1.1 maxv {
73 1.1 maxv pd_entry_t *l0, *l1, *l2, *l3;
74 1.1 maxv paddr_t l0pa, pa;
75 1.1 maxv pd_entry_t pde;
76 1.1 maxv size_t idx;
77 1.1 maxv
78 1.1 maxv l0pa = reg_ttbr1_el1_read();
79 1.1 maxv l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
80 1.1 maxv
81 1.1 maxv idx = l0pde_index(va);
82 1.1 maxv pde = l0[idx];
83 1.1 maxv if (!l0pde_valid(pde)) {
84 1.1 maxv pa = __md_palloc();
85 1.1 maxv atomic_swap_64(&l0[idx], pa | L0_TABLE);
86 1.1 maxv } else {
87 1.1 maxv pa = l0pde_pa(pde);
88 1.1 maxv }
89 1.1 maxv l1 = (void *)AARCH64_PA_TO_KVA(pa);
90 1.1 maxv
91 1.1 maxv idx = l1pde_index(va);
92 1.1 maxv pde = l1[idx];
93 1.1 maxv if (!l1pde_valid(pde)) {
94 1.1 maxv pa = __md_palloc();
95 1.1 maxv atomic_swap_64(&l1[idx], pa | L1_TABLE);
96 1.1 maxv } else {
97 1.1 maxv pa = l1pde_pa(pde);
98 1.1 maxv }
99 1.1 maxv l2 = (void *)AARCH64_PA_TO_KVA(pa);
100 1.1 maxv
101 1.1 maxv idx = l2pde_index(va);
102 1.1 maxv pde = l2[idx];
103 1.1 maxv if (!l2pde_valid(pde)) {
104 1.1 maxv pa = __md_palloc();
105 1.1 maxv atomic_swap_64(&l2[idx], pa | L2_TABLE);
106 1.1 maxv } else {
107 1.1 maxv pa = l2pde_pa(pde);
108 1.1 maxv }
109 1.1 maxv l3 = (void *)AARCH64_PA_TO_KVA(pa);
110 1.1 maxv
111 1.1 maxv idx = l3pte_index(va);
112 1.1 maxv pde = l3[idx];
113 1.1 maxv if (!l3pte_valid(pde)) {
114 1.1 maxv pa = __md_palloc();
115 1.1 maxv atomic_swap_64(&l3[idx], pa | L3_PAGE | LX_BLKPAG_UXN |
116 1.1 maxv LX_BLKPAG_PXN | LX_BLKPAG_AF | LX_BLKPAG_AP_RW);
117 1.1 maxv aarch64_tlbi_by_va(va);
118 1.1 maxv }
119 1.1 maxv }
120 1.1 maxv
121 1.1 maxv #define kasan_md_early_init(a) __nothing
122 1.1 maxv
123 1.1 maxv static void
124 1.1 maxv kasan_md_init(void)
125 1.1 maxv {
126 1.1 maxv vaddr_t eva, dummy;
127 1.1 maxv
128 1.1 maxv CTASSERT((__MD_SHADOW_SIZE / L0_SIZE) == 64);
129 1.1 maxv
130 1.1 maxv /* The VAs we've created until now. */
131 1.1 maxv pmap_virtual_space(&eva, &dummy);
132 1.1 maxv kasan_shadow_map((void *)VM_MIN_KERNEL_ADDRESS,
133 1.1 maxv eva - VM_MIN_KERNEL_ADDRESS);
134 1.1 maxv }
135 1.1 maxv
136 1.1 maxv #define kasan_md_unwind() __nothing
137